From d60283394b4bfbab7f5b6c8e230b8c0d2c5d289a Mon Sep 17 00:00:00 2001 From: luanshaotong Date: Mon, 14 Jul 2025 08:11:08 +0000 Subject: [PATCH 01/19] pass annotations --- container_opts.go | 18 +++++++++++++++++- pkg/cri/server/container_create.go | 8 ++++++++ pkg/cri/server/container_create_linux.go | 18 ++++++++++++++++++ 3 files changed, 43 insertions(+), 1 deletion(-) diff --git a/container_opts.go b/container_opts.go index 4a937032f566..6d2cadaf502a 100644 --- a/container_opts.go +++ b/container_opts.go @@ -229,7 +229,23 @@ func WithNewSnapshot(id string, i Image, opts ...snapshots.Opt) NewContainerOpts if err != nil { return err } - if _, err := s.Prepare(ctx, id, parent, opts...); err != nil { + base := snapshots.Info{} + for _, opt := range opts { + if err := opt(&base); err != nil { + return fmt.Errorf("error applying snapshot option: %w", err) + } + } + start_opts := []snapshots.Opt{} + // if sealos.io/devbox/use-limit is set, move it to containerd.io/snapshot/new-layer-limit + if limit, ok := base.Labels["sealos.io/devbox/use-limit"]; ok { + if limit != "" { + start_opts = append(start_opts, snapshots.WithLabels(map[string]string{ + "containerd.io/snapshot/new-layer-limit": limit, + })) + } + } + start_opts = append(start_opts, opts...) + if _, err := s.Prepare(ctx, id, parent, start_opts...); err != nil { return err } c.SnapshotKey = id diff --git a/pkg/cri/server/container_create.go b/pkg/cri/server/container_create.go index 54eff77bd74b..2efd4fd0936b 100644 --- a/pkg/cri/server/container_create.go +++ b/pkg/cri/server/container_create.go @@ -189,6 +189,14 @@ func (c *criService) CreateContainer(ctx context.Context, r *runtime.CreateConta return nil, err } + devboxOpt, err := devboxSnapshotterOpts(c.runtimeSnapshotter(ctx, ociRuntime), r.GetSandboxConfig()) + if err != nil { + return nil, err + } + if devboxOpt != nil { + sOpts = append(sOpts, devboxOpt) + } + // Set snapshotter before any other options. opts := []containerd.NewContainerOpts{ containerd.WithSnapshotter(c.runtimeSnapshotter(ctx, ociRuntime)), diff --git a/pkg/cri/server/container_create_linux.go b/pkg/cri/server/container_create_linux.go index 47922d5e6d02..684ef7126b8c 100644 --- a/pkg/cri/server/container_create_linux.go +++ b/pkg/cri/server/container_create_linux.go @@ -610,6 +610,24 @@ func generateUserString(username string, uid, gid *runtime.Int64Value) (string, return userstr, nil } +// snapshotterOpts returns any Linux specific snapshotter options for the rootfs snapshot +func devboxSnapshotterOpts(snapshotterName string, config *runtime.PodSandboxConfig) (snapshots.Opt, error) { + fmt.Printf("devboxSnapshotterOpts: snapshotterName=%s, config=%+v\n", snapshotterName, config) + if snapshotterName != "sealos-devbox-snapshotter" { + return nil, nil + } + // add container annotations to snapshot labels + labels := make(map[string]string) + for k, v := range config.Annotations { + // if strings.HasPrefix(k, DevboxSnapshotLabelPrefix) { + labels[k] = v + fmt.Printf("devboxSnapshotterOpts: k=%s, v=%s\n", k, v) + // } + } + // labels["sealos.io/devbox/use-limit"] = "10Gi" + return snapshots.WithLabels(labels), nil +} + // use SEALOS_DEVBOX_UID to set the uid of the container // we don't use pod annotations or labels because it will cause circular dependency const devboxUidEnvKey = "devbox.sealos.io/uid" From 2c76946fdc73e9d27e74af77df8a4ed4034769c4 Mon Sep 17 00:00:00 2001 From: luanshaotong Date: Tue, 15 Jul 2025 06:24:18 +0000 Subject: [PATCH 02/19] add devbox snapshotter --- cmd/containerd/builtins/builtins_linux.go | 1 + go.mod | 41 +- go.sum | 196 ++- pkg/cri/server/container_create_linux.go | 4 +- snapshots/devbox/constants.go | 10 + snapshots/devbox/devbox.go | 959 +++++++++++ snapshots/devbox/lvm/constants.go | 68 + snapshots/devbox/lvm/lvm.go | 1124 ++++++++++++ snapshots/devbox/plugin/plugin.go | 82 + snapshots/devbox/storage/bolt.go | 846 +++++++++ snapshots/devbox/storage/metastore.go | 157 ++ .../emicklei/go-restful/v3/CHANGES.md | 5 + .../emicklei/go-restful/v3/README.md | 4 + .../emicklei/go-restful/v3/route_builder.go | 24 +- .../compress/flate/_gen/gen_inflate.go | 303 ---- .../klauspost/compress/flate/deflate_test.go | 665 ------- vendor/github.com/openebs/lvm-localpv/LICENSE | 201 +++ .../pkg/apis/openebs.io/lvm/v1alpha1/doc.go | 21 + .../apis/openebs.io/lvm/v1alpha1/lvmnode.go | 125 ++ .../openebs.io/lvm/v1alpha1/lvmsnapshot.go | 68 + .../apis/openebs.io/lvm/v1alpha1/lvmvolume.go | 123 ++ .../apis/openebs.io/lvm/v1alpha1/register.go | 82 + .../lvm/v1alpha1/zz_generated.deepcopy.go | 319 ++++ .../prometheus/client_model/go/metrics.pb.go | 1530 +++++++++++------ .../prometheus/common/expfmt/decode.go | 5 +- .../prometheus/common/expfmt/encode.go | 13 +- .../prometheus/common/expfmt/expfmt.go | 26 +- .../prometheus/common/expfmt/text_parse.go | 2 +- vendor/golang.org/x/net/context/context.go | 112 +- vendor/golang.org/x/net/context/go17.go | 72 - vendor/golang.org/x/net/context/go19.go | 20 - vendor/golang.org/x/net/context/pre_go17.go | 300 ---- vendor/golang.org/x/net/context/pre_go19.go | 109 -- vendor/golang.org/x/net/http2/config.go | 2 +- vendor/golang.org/x/net/http2/config_go124.go | 2 +- vendor/golang.org/x/net/http2/frame.go | 11 + vendor/golang.org/x/net/http2/http2.go | 39 +- vendor/golang.org/x/net/http2/server.go | 130 +- vendor/golang.org/x/net/http2/transport.go | 347 +--- vendor/golang.org/x/net/http2/write.go | 3 +- .../x/net/internal/httpcommon/ascii.go | 53 + .../httpcommon}/headermap.go | 24 +- .../x/net/internal/httpcommon/request.go | 467 +++++ vendor/golang.org/x/net/proxy/per_host.go | 8 +- .../golang.org/x/net/websocket/websocket.go | 5 +- vendor/golang.org/x/sync/errgroup/errgroup.go | 3 +- vendor/golang.org/x/sync/errgroup/go120.go | 13 - .../golang.org/x/sync/errgroup/pre_go120.go | 14 - vendor/golang.org/x/sys/cpu/cpu.go | 3 + vendor/golang.org/x/sys/cpu/cpu_x86.go | 21 +- vendor/golang.org/x/sys/unix/auxv.go | 36 + .../golang.org/x/sys/unix/auxv_unsupported.go | 13 + .../x/sys/unix/syscall_dragonfly.go | 12 + .../golang.org/x/sys/unix/syscall_solaris.go | 87 + vendor/golang.org/x/sys/unix/zerrors_linux.go | 20 +- .../x/sys/unix/zerrors_linux_386.go | 3 + .../x/sys/unix/zerrors_linux_amd64.go | 3 + .../x/sys/unix/zerrors_linux_arm.go | 3 + .../x/sys/unix/zerrors_linux_arm64.go | 4 + .../x/sys/unix/zerrors_linux_loong64.go | 3 + .../x/sys/unix/zerrors_linux_mips.go | 3 + .../x/sys/unix/zerrors_linux_mips64.go | 3 + .../x/sys/unix/zerrors_linux_mips64le.go | 3 + .../x/sys/unix/zerrors_linux_mipsle.go | 3 + .../x/sys/unix/zerrors_linux_ppc.go | 3 + .../x/sys/unix/zerrors_linux_ppc64.go | 3 + .../x/sys/unix/zerrors_linux_ppc64le.go | 3 + .../x/sys/unix/zerrors_linux_riscv64.go | 3 + .../x/sys/unix/zerrors_linux_s390x.go | 3 + .../x/sys/unix/zerrors_linux_sparc64.go | 3 + .../x/sys/unix/zsyscall_solaris_amd64.go | 114 ++ .../x/sys/unix/zsysnum_linux_386.go | 4 + .../x/sys/unix/zsysnum_linux_amd64.go | 4 + .../x/sys/unix/zsysnum_linux_arm.go | 4 + .../x/sys/unix/zsysnum_linux_arm64.go | 4 + .../x/sys/unix/zsysnum_linux_loong64.go | 4 + .../x/sys/unix/zsysnum_linux_mips.go | 4 + .../x/sys/unix/zsysnum_linux_mips64.go | 4 + .../x/sys/unix/zsysnum_linux_mips64le.go | 4 + .../x/sys/unix/zsysnum_linux_mipsle.go | 4 + .../x/sys/unix/zsysnum_linux_ppc.go | 4 + .../x/sys/unix/zsysnum_linux_ppc64.go | 4 + .../x/sys/unix/zsysnum_linux_ppc64le.go | 4 + .../x/sys/unix/zsysnum_linux_riscv64.go | 4 + .../x/sys/unix/zsysnum_linux_s390x.go | 4 + .../x/sys/unix/zsysnum_linux_sparc64.go | 4 + vendor/golang.org/x/sys/unix/ztypes_linux.go | 6 +- .../golang.org/x/sys/windows/dll_windows.go | 11 +- vendor/golang.org/x/time/AUTHORS | 3 - vendor/golang.org/x/time/CONTRIBUTORS | 3 - vendor/golang.org/x/time/rate/rate.go | 123 +- vendor/golang.org/x/time/rate/sometimes.go | 67 + vendor/k8s.io/klog/v2/format.go | 65 + .../klog/v2/internal/serialize/keyvalues.go | 47 +- vendor/k8s.io/klog/v2/k8s_references.go | 12 +- vendor/k8s.io/klog/v2/klog.go | 13 + vendor/modules.txt | 53 +- 97 files changed, 6865 insertions(+), 2688 deletions(-) create mode 100644 snapshots/devbox/constants.go create mode 100644 snapshots/devbox/devbox.go create mode 100644 snapshots/devbox/lvm/constants.go create mode 100644 snapshots/devbox/lvm/lvm.go create mode 100644 snapshots/devbox/plugin/plugin.go create mode 100644 snapshots/devbox/storage/bolt.go create mode 100644 snapshots/devbox/storage/metastore.go delete mode 100644 vendor/github.com/klauspost/compress/flate/_gen/gen_inflate.go delete mode 100644 vendor/github.com/klauspost/compress/flate/deflate_test.go create mode 100644 vendor/github.com/openebs/lvm-localpv/LICENSE create mode 100644 vendor/github.com/openebs/lvm-localpv/pkg/apis/openebs.io/lvm/v1alpha1/doc.go create mode 100644 vendor/github.com/openebs/lvm-localpv/pkg/apis/openebs.io/lvm/v1alpha1/lvmnode.go create mode 100644 vendor/github.com/openebs/lvm-localpv/pkg/apis/openebs.io/lvm/v1alpha1/lvmsnapshot.go create mode 100644 vendor/github.com/openebs/lvm-localpv/pkg/apis/openebs.io/lvm/v1alpha1/lvmvolume.go create mode 100644 vendor/github.com/openebs/lvm-localpv/pkg/apis/openebs.io/lvm/v1alpha1/register.go create mode 100644 vendor/github.com/openebs/lvm-localpv/pkg/apis/openebs.io/lvm/v1alpha1/zz_generated.deepcopy.go delete mode 100644 vendor/golang.org/x/net/context/go17.go delete mode 100644 vendor/golang.org/x/net/context/go19.go delete mode 100644 vendor/golang.org/x/net/context/pre_go17.go delete mode 100644 vendor/golang.org/x/net/context/pre_go19.go create mode 100644 vendor/golang.org/x/net/internal/httpcommon/ascii.go rename vendor/golang.org/x/net/{http2 => internal/httpcommon}/headermap.go (74%) create mode 100644 vendor/golang.org/x/net/internal/httpcommon/request.go delete mode 100644 vendor/golang.org/x/sync/errgroup/go120.go delete mode 100644 vendor/golang.org/x/sync/errgroup/pre_go120.go create mode 100644 vendor/golang.org/x/sys/unix/auxv.go create mode 100644 vendor/golang.org/x/sys/unix/auxv_unsupported.go delete mode 100644 vendor/golang.org/x/time/AUTHORS delete mode 100644 vendor/golang.org/x/time/CONTRIBUTORS create mode 100644 vendor/golang.org/x/time/rate/sometimes.go create mode 100644 vendor/k8s.io/klog/v2/format.go diff --git a/cmd/containerd/builtins/builtins_linux.go b/cmd/containerd/builtins/builtins_linux.go index 2970839ca02a..f69c0e0bef55 100644 --- a/cmd/containerd/builtins/builtins_linux.go +++ b/cmd/containerd/builtins/builtins_linux.go @@ -22,6 +22,7 @@ import ( _ "github.com/containerd/containerd/metrics/cgroups/v2" _ "github.com/containerd/containerd/runtime/v1/linux" _ "github.com/containerd/containerd/snapshots/blockfile/plugin" + _ "github.com/containerd/containerd/snapshots/devbox/plugin" _ "github.com/containerd/containerd/snapshots/native/plugin" _ "github.com/containerd/containerd/snapshots/overlay/plugin" ) diff --git a/go.mod b/go.mod index a7bb372ad04d..41678aba1c27 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,8 @@ module github.com/containerd/containerd -go 1.21 +go 1.23.0 + +toolchain go1.24.4 require ( dario.cat/mergo v1.0.0 @@ -33,7 +35,7 @@ require ( github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c github.com/docker/go-metrics v0.0.1 github.com/docker/go-units v0.5.0 - github.com/emicklei/go-restful/v3 v3.10.1 + github.com/emicklei/go-restful/v3 v3.10.2 github.com/fsnotify/fsnotify v1.6.0 github.com/google/go-cmp v0.6.0 github.com/google/uuid v1.4.0 @@ -54,7 +56,9 @@ require ( github.com/opencontainers/runtime-spec v1.1.0 github.com/opencontainers/runtime-tools v0.9.1-0.20221107090550-2e043c6bd626 github.com/opencontainers/selinux v1.11.0 + github.com/openebs/lvm-localpv v1.7.0 github.com/pelletier/go-toml v1.9.5 + github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.16.0 github.com/sirupsen/logrus v1.9.3 github.com/stretchr/testify v1.8.4 @@ -70,21 +74,21 @@ require ( go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0 go.opentelemetry.io/otel/sdk v1.21.0 go.opentelemetry.io/otel/trace v1.21.0 - golang.org/x/net v0.33.0 - golang.org/x/sync v0.10.0 - golang.org/x/sys v0.28.0 + golang.org/x/net v0.38.0 + golang.org/x/sync v0.12.0 + golang.org/x/sys v0.31.0 google.golang.org/genproto v0.0.0-20231211222908-989df2bf70f3 google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda google.golang.org/grpc v1.59.0 google.golang.org/protobuf v1.35.2 - k8s.io/api v0.26.2 + k8s.io/api v0.27.2 k8s.io/apimachinery v0.27.4 k8s.io/apiserver v0.26.2 - k8s.io/client-go v0.26.2 + k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible k8s.io/component-base v0.26.2 k8s.io/cri-api v0.27.1 - k8s.io/klog/v2 v2.90.1 - k8s.io/utils v0.0.0-20230220204549-a5ecb0141aa5 + k8s.io/klog/v2 v2.100.1 + k8s.io/utils v0.0.0-20230505201702-9f6742963106 tags.cncf.io/container-device-interface v0.8.1 ) @@ -117,10 +121,9 @@ require ( github.com/moby/spdystream v0.2.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect - github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/client_model v0.3.0 // indirect - github.com/prometheus/common v0.42.0 // indirect + github.com/prometheus/client_model v0.4.0 // indirect + github.com/prometheus/common v0.44.0 // indirect github.com/prometheus/procfs v0.10.1 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect @@ -131,12 +134,12 @@ require ( go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/otel/metric v1.21.0 // indirect go.opentelemetry.io/proto/otlp v1.0.0 // indirect - golang.org/x/crypto v0.31.0 // indirect + golang.org/x/crypto v0.36.0 // indirect golang.org/x/mod v0.17.0 // indirect golang.org/x/oauth2 v0.11.0 // indirect - golang.org/x/term v0.27.0 // indirect - golang.org/x/text v0.21.0 // indirect - golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect + golang.org/x/term v0.30.0 // indirect + golang.org/x/text v0.23.0 // indirect + golang.org/x/time v0.3.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20231120223509-83a465c0220f // indirect gopkg.in/inf.v0 v0.9.1 // indirect @@ -151,3 +154,9 @@ require ( // Workaround for indirect dependency no longer being available. // https://github.com/containerd/containerd/issues/9969 exclude github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f + +replace k8s.io/apimachinery v0.27.2 => k8s.io/apimachinery v0.24.17 + +replace k8s.io/client-go => k8s.io/client-go v0.26.2 + +replace k8s.io/api => k8s.io/api v0.26.2 diff --git a/go.sum b/go.sum index a80a1f3fcc5c..b83db85557fa 100644 --- a/go.sum +++ b/go.sum @@ -10,10 +10,17 @@ cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6T cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= cloud.google.com/go v0.110.10 h1:LXy9GEO+timppncPIAZoOj3l58LIU9k+kn48AN7IO3Y= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiVlk= cloud.google.com/go/compute v1.23.3/go.mod h1:VCgBUoMnIVIR0CscqQiPJLAG25E3ZRZMzcFZeQ+h8CI= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= @@ -23,9 +30,12 @@ cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1 cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= @@ -36,15 +46,6 @@ github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20230306123547-8075edf89bb0/go.mod github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= -github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= -github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= -github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= -github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= -github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= -github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= -github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= @@ -233,6 +234,7 @@ github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsr github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ= github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s= @@ -267,8 +269,10 @@ github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25Kn github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emicklei/go-restful/v3 v3.10.1 h1:rc42Y5YTp7Am7CS630D7JmhRjq4UlEUuEKfrDac4bSQ= -github.com/emicklei/go-restful/v3 v3.10.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.8.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.10.2 h1:hIovbnmBTLjHXkqEBUz3HGpXZdM7ZrE9fJIZIqlJLqE= +github.com/emicklei/go-restful/v3 v3.10.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= @@ -276,10 +280,10 @@ github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7 github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/frankban/quicktest v1.14.0 h1:+cqqvzZV87b4adx/5ayVOaYZ2CrvM4ejQvUdBzPPUss= github.com/frankban/quicktest v1.14.0/go.mod h1:NeW+ay9A/U67EYXNFA1nPE8e/tnQv/09mUdL/ijj8og= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= @@ -304,21 +308,25 @@ github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7 github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= -github.com/go-openapi/jsonreference v0.20.1 h1:FBLnyygC4/IZZr893oiomc9XaghoveYTrLC1F86HID8= -github.com/go-openapi/jsonreference v0.20.1/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo= +github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= +github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= @@ -356,6 +364,8 @@ github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -376,18 +386,22 @@ github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54= +github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= +github.com/google/gnostic v0.6.9 h1:ZK/5VhkoX835RikCHpSUJV9a+S3e1zLh59YnyWeBW+0= +github.com/google/gnostic v0.6.9/go.mod h1:Nm8234We1lq6iB9OmlgNv3nH91XLLVZHCDayfA3xq+E= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= @@ -396,11 +410,14 @@ github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/ github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6 h1:k7nVchz72niMH6YLQNvHSdIE7iqsQxK1P41mySCvssg= github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw= @@ -442,7 +459,7 @@ github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= @@ -489,6 +506,7 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= +github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho= @@ -543,6 +561,7 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8m github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= @@ -558,6 +577,10 @@ github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vv github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= +github.com/onsi/ginkgo/v2 v2.1.4/go.mod h1:um6tUpWM/cxCK3/FK8BXqEiUMUwRgSM4JXG47RKZmLU= +github.com/onsi/ginkgo/v2 v2.1.6/go.mod h1:MEH45j8TBi6u9BMogfbp0stKC5cdGjumZj5Y7AG4VIk= +github.com/onsi/ginkgo/v2 v2.3.0/go.mod h1:Eew0uilEqZmIEZr8JrvYlvOM7Rr6xzTmMV8AyFNU9d0= +github.com/onsi/ginkgo/v2 v2.4.0/go.mod h1:iHkDK1fKGcBoEHT5W7YBq4RFWaQulw+caOMkAt4OrFo= github.com/onsi/ginkgo/v2 v2.19.1 h1:QXgq3Z8Crl5EL1WBAC98A5sEBHARrAJNzAmMxzLcRF0= github.com/onsi/ginkgo/v2 v2.19.1/go.mod h1:O3DtEWQkPa/F7fBMgmZQKKsluAy8pd3rEQdrjkPb9zA= github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= @@ -566,6 +589,11 @@ github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1Cpa github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= +github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= +github.com/onsi/gomega v1.20.1/go.mod h1:DtrZpjmvpn2mPm4YWQa0/ALMDj9v4YxLgojwPeREyVo= +github.com/onsi/gomega v1.21.1/go.mod h1:iYAIXgPSaDHak0LCMA+AWBpIKBr8WZicMxnE8luStNc= +github.com/onsi/gomega v1.22.1/go.mod h1:x6n7VNe4hw0vkyYUM4mjIXx3JbLiPaBPNgB7PRQ1tuM= +github.com/onsi/gomega v1.23.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg= github.com/onsi/gomega v1.34.0 h1:eSSPsPNp6ZpsG8X1OVmOTxig+CblTc4AxpPBykhe2Os= github.com/onsi/gomega v1.34.0/go.mod h1:MIKI8c+f+QLWk+hxbePD4i0LMJSExPaZOVfkoex4cAo= github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= @@ -599,6 +627,8 @@ github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3 github.com/opencontainers/selinux v1.9.1/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= github.com/opencontainers/selinux v1.11.0 h1:+5Zbo97w3Lbmb3PeqQtpmTkMwsW5nRI3YaLpt7tQ7oU= github.com/opencontainers/selinux v1.11.0/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec= +github.com/openebs/lvm-localpv v1.7.0 h1:fjqzMuAYJcV8gjzLPteXr3oiiVXuZvuI67fz20Ubn4k= +github.com/openebs/lvm-localpv v1.7.0/go.mod h1:/kaYdEZ/5wyRWwTJdrVzQu/u9iGuZ2U7msIl++XBp5o= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= @@ -623,14 +653,14 @@ github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1: github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= -github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= +github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= +github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM= -github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc= +github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= +github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= @@ -679,6 +709,7 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8= github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6 h1:pnnLyeX7o/5aX8qUQ69P/mLojDqwda8hFOCBTmP/6hw= github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6/go.mod h1:39R/xuhNgVhi+K0/zst4TLrJrVmbm6LVgl4A0+ZFS5M= +github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -734,8 +765,11 @@ github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= @@ -751,6 +785,7 @@ go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 h1:SpGay3w+nEwMpfVnbqOLH5gY52/foP8RE8UzTZ1pdSE= @@ -792,9 +827,10 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= -golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= -golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= +golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34= +golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -825,7 +861,9 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= @@ -846,6 +884,7 @@ golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -857,23 +896,36 @@ golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= -golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= -golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= +golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8= +golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU= golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -882,13 +934,15 @@ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= -golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw= +golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -933,9 +987,15 @@ golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -949,23 +1009,29 @@ golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= +golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= -golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= -golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= +golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y= +golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -974,18 +1040,19 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= -golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= +golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= +golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 h1:vVKdlvoWBphwdxWKrFZEuM0kGgGLxUOYcY4U/2Vjg44= golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= +golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -1020,11 +1087,24 @@ golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= @@ -1042,12 +1122,19 @@ google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsb google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk= @@ -1070,9 +1157,20 @@ google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvx google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20231211222908-989df2bf70f3 h1:1hfbdAfFbkmpg41000wDVqr7jUpK/Yo+LPnIxxGzmkg= google.golang.org/genproto v0.0.0-20231211222908-989df2bf70f3/go.mod h1:5RBcpGRxr25RbDzY5w+dmaqpSEvl8Gwl1x2CICf60ic= @@ -1092,8 +1190,10 @@ google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQ google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= @@ -1110,6 +1210,8 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= @@ -1118,6 +1220,7 @@ gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= @@ -1143,6 +1246,8 @@ gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= @@ -1154,16 +1259,16 @@ honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= k8s.io/api v0.26.2 h1:dM3cinp3PGB6asOySalOZxEG4CZ0IAdJsrYZXE/ovGQ= k8s.io/api v0.26.2/go.mod h1:1kjMQsFE+QHPfskEcVNgL3+Hp88B80uj0QtSOlj8itU= k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= +k8s.io/apimachinery v0.26.2/go.mod h1:ats7nN1LExKHvJ9TmwootT00Yz05MuYqPXEXaVeOy5I= k8s.io/apimachinery v0.27.4 h1:CdxflD4AF61yewuid0fLl6bM4a3q04jWel0IlP+aYjs= k8s.io/apimachinery v0.27.4/go.mod h1:XNfZ6xklnMCOGGFNqXG7bUrQCoR04dh/E7FprV6pb+E= k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= k8s.io/apiserver v0.26.2 h1:Pk8lmX4G14hYqJd1poHGC08G03nIHVqdJMR0SD3IH3o= k8s.io/apiserver v0.26.2/go.mod h1:GHcozwXgXsPuOJ28EnQ/jXEM9QeG6HT22YxSNmpYNh8= -k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y= k8s.io/client-go v0.26.2 h1:s1WkVujHX3kTp4Zn4yGNFK+dlDXy1bAAkIl+cFAiuYI= k8s.io/client-go v0.26.2/go.mod h1:u5EjOuSyBa09yqqyY7m3abZeovO/7D/WehVVlZ2qcqU= k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk= @@ -1174,21 +1279,28 @@ k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= k8s.io/cri-api v0.27.1 h1:KWO+U8MfI9drXB/P4oU9VchaWYOlwDglJZVHWMpTT3Q= k8s.io/cri-api v0.27.1/go.mod h1:+Ts/AVYbIo04S86XbTD73UPp/DkTiYxtsFeOFEu32L0= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= +k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/klog/v2 v2.90.1 h1:m4bYOKall2MmOiRaR1J+We67Do7vm9KiQVlT96lnHUw= -k8s.io/klog/v2 v2.90.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= +k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= -k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f h1:2kWPakN3i/k81b0gvD5C5FJ2kxm1WrQFanWchyKuqGg= -k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f/go.mod h1:byini6yhqGC14c3ebc/QwanvYwhuMWF6yz2F8uwW8eg= +k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280/go.mod h1:+Axhij7bCpeqhklhUTe3xmOn6bWxolyZEeyaFpjGtl4= +k8s.io/kube-openapi v0.0.0-20230525220651-2546d827e515 h1:OmK1d0WrkD3IPfkskvroRykOulHVHf0s0ZIFRjyt+UI= +k8s.io/kube-openapi v0.0.0-20230525220651-2546d827e515/go.mod h1:kzo02I3kQ4BTtEfVLaPbjvCkX97YqGve33wzlb3fofQ= k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20230220204549-a5ecb0141aa5 h1:kmDqav+P+/5e1i9tFfHq1qcF3sOrDp+YEkVDAHu7Jwk= -k8s.io/utils v0.0.0-20230220204549-a5ecb0141aa5/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20221107191617-1a15be271d1d/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/utils v0.0.0-20230505201702-9f6742963106 h1:EObNQ3TW2D+WptiYXlApGNLVy0zm/JIBVY9i+M4wpAU= +k8s.io/utils v0.0.0-20230505201702-9f6742963106/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= diff --git a/pkg/cri/server/container_create_linux.go b/pkg/cri/server/container_create_linux.go index 684ef7126b8c..5b484b74b378 100644 --- a/pkg/cri/server/container_create_linux.go +++ b/pkg/cri/server/container_create_linux.go @@ -613,7 +613,7 @@ func generateUserString(username string, uid, gid *runtime.Int64Value) (string, // snapshotterOpts returns any Linux specific snapshotter options for the rootfs snapshot func devboxSnapshotterOpts(snapshotterName string, config *runtime.PodSandboxConfig) (snapshots.Opt, error) { fmt.Printf("devboxSnapshotterOpts: snapshotterName=%s, config=%+v\n", snapshotterName, config) - if snapshotterName != "sealos-devbox-snapshotter" { + if snapshotterName != "devbox" { return nil, nil } // add container annotations to snapshot labels @@ -624,7 +624,7 @@ func devboxSnapshotterOpts(snapshotterName string, config *runtime.PodSandboxCon fmt.Printf("devboxSnapshotterOpts: k=%s, v=%s\n", k, v) // } } - // labels["sealos.io/devbox/use-limit"] = "10Gi" + // labels["devbox.sealos.io/use-limit"] = "10Gi" return snapshots.WithLabels(labels), nil } diff --git a/snapshots/devbox/constants.go b/snapshots/devbox/constants.go new file mode 100644 index 000000000000..fe983cfcde79 --- /dev/null +++ b/snapshots/devbox/constants.go @@ -0,0 +1,10 @@ +//go:build linux + +package devbox + +const ( + // 改插件默认的存储路径 + DefaultRootDir = "/var/lib/containerd/io.sealos.labring.devbox" + // 该插件提供 grpc 服务的 socks 文件名,路径为 paths.Join(rootDir, SocksFileName) + SocksFileName = "grpc.socks" +) diff --git a/snapshots/devbox/devbox.go b/snapshots/devbox/devbox.go new file mode 100644 index 000000000000..c36e860fae59 --- /dev/null +++ b/snapshots/devbox/devbox.go @@ -0,0 +1,959 @@ +//go:build linux + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package devbox + +import ( + "context" + "fmt" + "os" + "os/exec" + "path/filepath" + "strconv" + "strings" + "syscall" + + "github.com/containerd/containerd/mount" + "github.com/containerd/containerd/snapshots" + "github.com/containerd/containerd/snapshots/overlay/overlayutils" + "github.com/containerd/continuity/fs" + "github.com/containerd/errdefs" + "github.com/containerd/log" + + apis "github.com/openebs/lvm-localpv/pkg/apis/openebs.io/lvm/v1alpha1" + "github.com/sirupsen/logrus" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/containerd/containerd/snapshots/devbox/lvm" + "github.com/containerd/containerd/snapshots/devbox/storage" +) + +// upperdirKey is a key of an optional label to each snapshot. +// This optional label of a snapshot contains the location of "upperdir" where +// the change set between this snapshot and its parent is stored. +const upperdirKey = "containerd.io/snapshot/overlay.upperdir" + +const newLayerLimitKey = "containerd.io/snapshot/new-layer-limit" +const devboxContentKey = "containerd.io/snapshot/devbox-content-id" +const removeDevboxContentKey = "containerd.io/snapshot/devbox-remove-content-id" + +// SnapshotterConfig is used to configure the overlay snapshotter instance +type SnapshotterConfig struct { + AsyncRemove bool + UpperdirLabel bool + ms MetaStore + lvmVgName string // modified by sealos + mountOptions []string +} + +// Opt is an option to configure the overlay snapshotter +type Opt func(config *SnapshotterConfig) error + +// AsynchronousRemove defers removal of filesystem content until +// the Cleanup method is called. Removals will make the snapshot +// referred to by the key unavailable and make the key immediately +// available for re-use. +func AsynchronousRemove(config *SnapshotterConfig) error { + config.AsyncRemove = true + return nil +} + +// WithUpperdirLabel adds as an optional label +// "containerd.io/snapshot/overlay.upperdir". This stores the location +// of the upperdir that contains the changeset between the labelled +// snapshot and its parent. +func WithUpperdirLabel(config *SnapshotterConfig) error { + config.UpperdirLabel = true + return nil +} + +// modified by sealos +// WithLvmVgName sets the name of the LVM volume group to use for the overlay +func WithLvmVgName(name string) Opt { + return func(config *SnapshotterConfig) error { + config.lvmVgName = name + return nil + } +} + +// end modified by sealos + +// WithMountOptions defines the default mount options used for the overlay mount. +// NOTE: Options are not applied to bind mounts. +func WithMountOptions(options []string) Opt { + return func(config *SnapshotterConfig) error { + config.mountOptions = append(config.mountOptions, options...) + return nil + } +} + +type MetaStore interface { + TransactionContext(ctx context.Context, writable bool) (context.Context, storage.Transactor, error) + WithTransaction(ctx context.Context, writable bool, fn storage.TransactionCallback) error + Close() error +} + +// WithMetaStore allows the MetaStore to be created outside the snapshotter +// and passed in. +func WithMetaStore(ms MetaStore) Opt { + return func(config *SnapshotterConfig) error { + config.ms = ms + return nil + } +} + +type snapshotter struct { + root string + ms MetaStore + asyncRemove bool + upperdirLabel bool + lvmVgName string // modified by sealos + options []string +} + +// NewSnapshotter returns a Snapshotter which uses overlayfs. The overlayfs +// diffs are stored under the provided root. A metadata file is stored under +// the root. +func NewSnapshotter(root string, opts ...Opt) (snapshots.Snapshotter, error) { + var config SnapshotterConfig + for _, opt := range opts { + if err := opt(&config); err != nil { + return nil, err + } + } + + if err := os.MkdirAll(root, 0700); err != nil { + return nil, err + } + supportsDType, err := fs.SupportsDType(root) + if err != nil { + return nil, err + } + if !supportsDType { + return nil, fmt.Errorf("%s does not support d_type. If the backing filesystem is xfs, please reformat with ftype=1 to enable d_type support", root) + } + if config.ms == nil { + config.ms, err = storage.NewMetaStore(filepath.Join(root, "metadata.db")) + if err != nil { + return nil, err + } + } + + if err := os.Mkdir(filepath.Join(root, "snapshots"), 0700); err != nil && !os.IsExist(err) { + return nil, err + } + + if !hasOption(config.mountOptions, "userxattr", false) { + // figure out whether "userxattr" option is recognized by the kernel && needed + userxattr, err := overlayutils.NeedsUserXAttr(root) + if err != nil { + logrus.WithError(err).Warnf("cannot detect whether \"userxattr\" option needs to be used, assuming to be %v", userxattr) + } + if userxattr { + config.mountOptions = append(config.mountOptions, "userxattr") + } + } + + if !hasOption(config.mountOptions, "index", false) && supportsIndex() { + config.mountOptions = append(config.mountOptions, "index=off") + } + + return &snapshotter{ + root: root, + ms: config.ms, + asyncRemove: config.AsyncRemove, + upperdirLabel: config.UpperdirLabel, + lvmVgName: config.lvmVgName, // modified by sealos + options: config.mountOptions, + }, nil +} + +func hasOption(options []string, key string, hasValue bool) bool { + for _, option := range options { + if hasValue { + if strings.HasPrefix(option, key) && len(option) > len(key) && option[len(key)] == '=' { + return true + } + } else if option == key { + return true + } + } + return false +} + +// Stat returns the info for an active or committed snapshot by name or +// key. +// +// Should be used for parent resolution, existence checks and to discern +// the kind of snapshot. +func (o *snapshotter) Stat(ctx context.Context, key string) (info snapshots.Info, err error) { + var id string + if err := o.ms.WithTransaction(ctx, false, func(ctx context.Context) error { + id, info, _, err = storage.GetInfo(ctx, key) + return err + }); err != nil { + return info, err + } + + if o.upperdirLabel { + if info.Labels == nil { + info.Labels = make(map[string]string) + } + info.Labels[upperdirKey] = o.upperPath(id) + } + return info, nil +} + +func (o *snapshotter) Update(ctx context.Context, info snapshots.Info, fieldpaths ...string) (newInfo snapshots.Info, err error) { + err = o.ms.WithTransaction(ctx, true, func(ctx context.Context) error { + newInfo, err = storage.UpdateInfo(ctx, info, fieldpaths...) + if err != nil { + return err + } + + if o.upperdirLabel { + id, _, _, err := storage.GetInfo(ctx, newInfo.Name) + if err != nil { + return err + } + if newInfo.Labels == nil { + newInfo.Labels = make(map[string]string) + } + newInfo.Labels[upperdirKey] = o.upperPath(id) + } + return nil + }) + return newInfo, err +} + +// Usage returns the resources taken by the snapshot identified by key. +// +// For active snapshots, this will scan the usage of the overlay "diff" (aka +// "upper") directory and may take some time. +// +// For committed snapshots, the value is returned from the metadata database. +func (o *snapshotter) Usage(ctx context.Context, key string) (_ snapshots.Usage, err error) { + var ( + usage snapshots.Usage + info snapshots.Info + id string + ) + if err := o.ms.WithTransaction(ctx, false, func(ctx context.Context) error { + id, info, usage, err = storage.GetInfo(ctx, key) + return err + }); err != nil { + return usage, err + } + + if info.Kind == snapshots.KindActive { + upperPath := o.upperPath(id) + du, err := fs.DiskUsage(ctx, upperPath) + if err != nil { + // TODO(stevvooe): Consider not reporting an error in this case. + return snapshots.Usage{}, err + } + usage = snapshots.Usage(du) + } + return usage, nil +} + +func (o *snapshotter) Prepare(ctx context.Context, key, parent string, opts ...snapshots.Opt) ([]mount.Mount, error) { + fmt.Println("Prepare called with key:", key, "parent:", parent, "opts:", opts) + return o.createSnapshot(ctx, snapshots.KindActive, key, parent, opts) +} + +func (o *snapshotter) View(ctx context.Context, key, parent string, opts ...snapshots.Opt) ([]mount.Mount, error) { + return o.createSnapshot(ctx, snapshots.KindView, key, parent, opts) +} + +// Mounts returns the mounts for the transaction identified by key. Can be +// called on an read-write or readonly transaction. +// +// This can be used to recover mounts after calling View or Prepare. +func (o *snapshotter) Mounts(ctx context.Context, key string) (_ []mount.Mount, err error) { + var s storage.Snapshot + if err := o.ms.WithTransaction(ctx, false, func(ctx context.Context) error { + s, err = storage.GetSnapshot(ctx, key) + if err != nil { + return fmt.Errorf("failed to get active mount: %w", err) + } + return nil + }); err != nil { + return nil, err + } + return o.mounts(s), nil +} + +func (o *snapshotter) Commit(ctx context.Context, name, key string, opts ...snapshots.Opt) error { + return o.ms.WithTransaction(ctx, true, func(ctx context.Context) error { + // grab the existing id + id, _, _, err := storage.GetInfo(ctx, key) + if err != nil { + return err + } + + usage, err := fs.DiskUsage(ctx, o.upperPath(id)) + if err != nil { + return err + } + + if _, err = storage.CommitActive(ctx, key, name, snapshots.Usage(usage), opts...); err != nil { + return fmt.Errorf("failed to commit snapshot %s: %w", key, err) + } + return nil + }) +} + +// Remove abandons the snapshot identified by key. The snapshot will +// immediately become unavailable and unrecoverable. Disk space will +// be freed up on the next call to `Cleanup`. +func (o *snapshotter) Remove(ctx context.Context, key string) (err error) { + var ( + removals []string + removedLvNames []string + ) + + log.G(ctx).Infof("Remove called with key: %s", key) + // Remove directories after the transaction is closed, failures must not + // return error since the transaction is committed with the removal + // key no longer available. + defer func() { + if err == nil { + for _, dir := range removals { + // modified by sealos + if err1 := o.unmountLvm(ctx, dir); err1 != nil { + log.G(ctx).WithError(err1).WithField("path", dir).Warn("failed to unmount directory") + } + // end modified by sealos + if err1 := os.RemoveAll(dir); err1 != nil { + log.G(ctx).WithError(err1).WithField("path", dir).Warn("failed to remove directory") + } + } + for _, lvName := range removedLvNames { + vol := &apis.LVMVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: lvName, + }, + Spec: apis.VolumeInfo{ + VolGroup: o.lvmVgName, + }, + } + err := lvm.DestroyVolume(vol) + if err != nil { + log.G(ctx).WithError(err).WithField("lvName", lvName).Warn("failed to destroy LVM logical volume") + continue + } + log.G(ctx).Infof("LVM logical volume %s removed successfully", lvName) + } + } + }() + return o.ms.WithTransaction(ctx, true, func(ctx context.Context) error { + // modified by sealos + var mountPath string + mountPath, err = storage.RemoveDevboxContent(ctx, key) + log.G(ctx).Infof("Removed devbox content for key: %s, mount path: %s", key, mountPath) + if err != nil && err != errdefs.ErrNotFound { + return fmt.Errorf("failed to remove devbox content for snapshot %s: %w", key, err) + } + if mountPath != "" { + if err := o.unmountLvm(ctx, mountPath); err != nil { + log.G(ctx).WithError(err).WithField("path", mountPath).Warn("failed to unmount directory") + } + } + _, _, err = storage.Remove(ctx, key) + if err != nil { + return fmt.Errorf("failed to remove snapshot %s: %w", key, err) + } + + if !o.asyncRemove { + removals, err = o.getCleanupDirectories(ctx) + if err != nil { + return fmt.Errorf("unable to get directories for removal: %w", err) + } + removedLvNames, err = o.getCleanupLvNames(ctx) + if err != nil { + return fmt.Errorf("failed to get LVM logical volume names for snapshot %s: %w", key, err) + } + } + return nil + }) +} + +// Walk the snapshots. +func (o *snapshotter) Walk(ctx context.Context, fn snapshots.WalkFunc, fs ...string) error { + return o.ms.WithTransaction(ctx, false, func(ctx context.Context) error { + if o.upperdirLabel { + return storage.WalkInfo(ctx, func(ctx context.Context, info snapshots.Info) error { + id, _, _, err := storage.GetInfo(ctx, info.Name) + if err != nil { + return err + } + if info.Labels == nil { + info.Labels = make(map[string]string) + } + info.Labels[upperdirKey] = o.upperPath(id) + return fn(ctx, info) + }, fs...) + } + return storage.WalkInfo(ctx, fn, fs...) + }) +} + +// Cleanup cleans up disk resources from removed or abandoned snapshots +func (o *snapshotter) Cleanup(ctx context.Context) error { + log.G(ctx).Infof("Cleanup called") + cleanup, cleanupLv, err := o.cleanupDirectories(ctx) + if err != nil { + return err + } + + for _, dir := range cleanup { + // modified by sealos + if err := o.unmountLvm(ctx, dir); err != nil { + log.G(ctx).WithError(err).WithField("path", dir).Warn("failed to unmount directory") + } + // end modified by sealos + if err := os.RemoveAll(dir); err != nil { + log.G(ctx).WithError(err).WithField("path", dir).Warn("failed to remove directory") + } + } + + for _, lvName := range cleanupLv { + vol := &apis.LVMVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: lvName, + }, + Spec: apis.VolumeInfo{ + VolGroup: o.lvmVgName, + }, + } + err := lvm.DestroyVolume(vol) + if err != nil { + log.G(ctx).WithError(err).WithField("lvName", lvName).Warn("failed to destroy LVM logical volume") + continue + } + log.G(ctx).Infof("LVM logical volume %s removed successfully", lvName) + } + + return nil +} + +func (o *snapshotter) cleanupDirectories(ctx context.Context) (_ []string, _ []string, err error) { + var ( + cleanupDirs []string + removedLvNames []string + ) + // Get a write transaction to ensure no other write transaction can be entered + // while the cleanup is scanning. + if err := o.ms.WithTransaction(ctx, true, func(ctx context.Context) error { + cleanupDirs, err = o.getCleanupDirectories(ctx) + if err != nil { + return err + } + removedLvNames, err = o.getCleanupLvNames(ctx) + return err + }); err != nil { + return nil, nil, err + } + return cleanupDirs, removedLvNames, nil +} + +func (o *snapshotter) getCleanupDirectories(ctx context.Context) ([]string, error) { + ids, err := storage.IDMap(ctx) + if err != nil { + return nil, err + } + + snapshotDir := filepath.Join(o.root, "snapshots") + fd, err := os.Open(snapshotDir) + if err != nil { + return nil, err + } + defer fd.Close() + + dirs, err := fd.Readdirnames(0) + if err != nil { + return nil, err + } + + cleanup := []string{} + for _, d := range dirs { + if _, ok := ids[d]; ok { + continue + } + cleanup = append(cleanup, filepath.Join(snapshotDir, d)) + } + + return cleanup, nil +} + +// modified by sealos +func (o *snapshotter) getCleanupLvNames(ctx context.Context) ([]string, error) { + nameMap, err := storage.GetDevboxLvNames(ctx) + if err != nil { + return nil, err + } + + // lvs := o.vgo.ListLVs() + lvs, err := lvm.ListLVMLogicalVolumeByVG(o.lvmVgName) + if err != nil { + return nil, fmt.Errorf("failed to list LVM logical volumes: %w", err) + } + + cleanup := []string{} + for _, d := range lvs { + if _, ok := nameMap[d.Name]; ok { + continue + } + // Check if the name start with devbox + if strings.HasPrefix(d.Name, "devbox") { + cleanup = append(cleanup, d.Name) + } + } + + return cleanup, nil +} + +func isMountPoint(dir string) (bool, error) { + // 读取 /proc/mounts 文件 + data, err := os.ReadFile("/proc/mounts") + if err != nil { + return false, err + } + + // 检查目录是否在挂载列表中 + mounts := strings.Split(string(data), "\n") + for _, mount := range mounts { + if len(mount) == 0 { + continue + } + + fields := strings.Fields(mount) + if len(fields) < 2 { + continue + } + + mountPoint := fields[1] + if mountPoint == dir { + return true, nil + } + } + + return false, nil +} + +func (o *snapshotter) mkfs(lvName string) error { + devicePath := fmt.Sprintf("/dev/%s/%s", o.lvmVgName, lvName) + // Check if the device exists + if _, err := os.Stat(devicePath); os.IsNotExist(err) { + return fmt.Errorf("LVM logical volume %s does not exist: %w", devicePath, err) + } + + cmd := exec.Command("mkfs.ext4", devicePath) + output, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("failed to create filesystem on %s: %w, output: %s", devicePath, err, string(output)) + } + return nil +} + +func (o *snapshotter) mountLvm(ctx context.Context, lvName string, path string) error { + _, err := os.Stat(path) + if os.IsNotExist(err) { + if err := os.MkdirAll(path, 0755); err != nil { + return fmt.Errorf("failed to create directory %s: %w", path, err) + } + } else if err != nil { + return fmt.Errorf("failed to stat path %s: %w", path, err) + } + devicePath := fmt.Sprintf("/dev/%s/%s", o.lvmVgName, lvName) + err = syscall.Mount(devicePath, path, "ext4", 0, "") + if err != nil { + return fmt.Errorf("failed to mount LVM logical volume %s to %s: %w", devicePath, path, err) + } + return nil +} + +func (o *snapshotter) unmountLvm(ctx context.Context, path string) error { + isMounted, err := isMountPoint(path) + if err != nil { + return fmt.Errorf("failed to check if path %s is a mount point: %w", path, err) + } + if !isMounted { + log.G(ctx).Infof("Path %s is not mounted, skipping unmount", path) + return nil + } + err = syscall.Unmount(path, 0) + if err != nil { + return fmt.Errorf("failed to unmount path %s: %w", path, err) + } + return nil +} + +// end modified by sealos + +func (o *snapshotter) createSnapshot(ctx context.Context, kind snapshots.Kind, key, parent string, opts []snapshots.Opt) (_ []mount.Mount, err error) { + var ( + s storage.Snapshot + td, path, npath, lvName string + ) + + defer func() { + if err != nil { + if td != "" { + if err1 := o.unmountLvm(ctx, td); err1 != nil { + log.G(ctx).WithError(err1).Warn("failed to unmount temp snapshot directory") + } + if err1 := os.RemoveAll(td); err1 != nil { + log.G(ctx).WithError(err1).Warn("failed to cleanup temp snapshot directory") + } + } + if path != "" { + if err1 := o.unmountLvm(ctx, path); err1 != nil { + log.G(ctx).WithError(err1).WithField("path", path).Warn("failed to unmount snapshot directory") + } + if err1 := os.RemoveAll(path); err1 != nil { + log.G(ctx).WithError(err1).WithField("path", path).Error("failed to reclaim snapshot directory, directory may need removal") + err = fmt.Errorf("failed to remove path: %v: %w", err1, err) + } + } + } + }() + + base := snapshots.Info{} + for _, opt := range opts { + if err := opt(&base); err != nil { + return nil, fmt.Errorf("failed to apply snapshot option: %w", err) + } + } + + contentId, idOk := base.Labels[devboxContentKey] + useLimit, limitOk := base.Labels[newLayerLimitKey] + removeContentId, removeIdOk := base.Labels[removeDevboxContentKey] + + if err := o.ms.WithTransaction(ctx, true, func(ctx context.Context) (err error) { + if removeIdOk { + storage.SetDevboxContentStatusRemove(ctx, removeContentId) + } + + snapshotDir := filepath.Join(o.root, "snapshots") + + s, err = storage.CreateSnapshot(ctx, kind, key, parent, opts...) + if err != nil { + return fmt.Errorf("failed to mount LVM logical volume %s: %w", lvName, err) + } + + fmt.Println("Created snapshot:", s.ID) + npath = filepath.Join(snapshotDir, s.ID) // use npath instead of path to avoid removing the directory before create + fmt.Println("Snapshot directory path:", npath) + + if idOk && limitOk { + var notExistErr error + lvName, notExistErr = storage.GetDevboxLvName(ctx, contentId) + fmt.Println("LVM logical volume name for content ID:", contentId, "is", lvName) + if notExistErr == nil && lvName != "" { + // mount point for the snapshot + fmt.Println("LVM logical volume name found for content ID:", contentId, "is", lvName) + if isMounted, err := isMountPoint(npath); err != nil { + return fmt.Errorf("failed to check if path is a mount point: %w", err) + } else if isMounted { + log.G(ctx).Infof("Path %s is already mounted, skipping mount", npath) + } else { + // mount the LVM logical volume + if err = o.mountLvm(ctx, lvName, npath); err != nil { + return fmt.Errorf("failed to mount LVM logical volume %s: %w", lvName, err) + } + } + // reuse of old lv, no need to prepare a new directory + return nil + } else if notExistErr != errdefs.ErrNotFound { + return fmt.Errorf("failed to get LVM logical volume name for key %s: %w", contentId, notExistErr) + } + + // remove devbox metadata if new lv is created + defer func() { + if err != nil { + mountPath, err := storage.RemoveDevboxContent(ctx, key) + if err != nil { + log.G(ctx).WithError(err).Warnf("failed to remove devbox content for key %s", contentId) + } + if mountPath != "" { + if err := o.unmountLvm(ctx, mountPath); err != nil { + log.G(ctx).WithError(err).WithField("path", mountPath).Warn("failed to unmount directory") + } + } + } + }() + td, lvName, err = o.prepareLvmDirectory(ctx, snapshotDir, contentId, useLimit) + if err != nil { + return fmt.Errorf("failed to prepare LVM directory for snapshot: %w", err) + } + fmt.Println("Prepared LVM directory for snapshot:", td, "with logical volume name:", lvName) + storage.SetDevboxContent(ctx, key, contentId, lvName, npath) + if err != nil { + return fmt.Errorf("failed to prepare LVM directory for snapshot: %w", err) + } + } else { + td, err = o.prepareDirectory(ctx, snapshotDir, kind) + fmt.Println("Created temporary directory for snapshot:", td) + } + + if err != nil { + return fmt.Errorf("failed to create prepare snapshot dir: %w", err) + } + + if len(s.ParentIDs) > 0 { + st, err := os.Stat(o.upperPath(s.ParentIDs[0])) + if err != nil { + return fmt.Errorf("failed to stat parent: %w", err) + } + + stat := st.Sys().(*syscall.Stat_t) + if err := os.Lchown(filepath.Join(td, "fs"), int(stat.Uid), int(stat.Gid)); err != nil { + return fmt.Errorf("failed to chown: %w", err) + } + } + + if idOk && limitOk { + err = o.unmountLvm(ctx, td) + if err != nil { + return fmt.Errorf("failed to unmount LVM logical volume %s: %w", lvName, err) + } + fmt.Println("Unmounted LVM logical volume:", lvName, "from temporary directory:", td) + if err = os.MkdirAll(npath, 0755); err != nil { + return fmt.Errorf("failed to create snapshot directory: %w", err) + } + path = npath + fmt.Println("Created snapshot directory:", path) + err = o.mountLvm(ctx, lvName, path) + if err != nil { + return fmt.Errorf("failed to mount LVM logical volume %s: %w", lvName, err) + } + fmt.Println("Mounted LVM logical volume:", lvName, "to snapshot directory:", path) + } else { + if err = os.Rename(td, npath); err != nil { + return fmt.Errorf("failed to rename: %w", err) + } + path = npath + fmt.Println("Renamed temporary directory to snapshot directory:", path) + } + td = "" + + return nil + }); err != nil { + return nil, err + } + + return o.mounts(s), nil +} + +func (o *snapshotter) prepareDirectory(ctx context.Context, snapshotDir string, kind snapshots.Kind) (string, error) { + td, err := os.MkdirTemp(snapshotDir, "new-") + if err != nil { + return "", fmt.Errorf("failed to create temp dir: %w", err) + } + + if err := os.Mkdir(filepath.Join(td, "fs"), 0755); err != nil { + return td, err + } + + if kind == snapshots.KindActive { + if err := os.Mkdir(filepath.Join(td, "work"), 0711); err != nil { + return td, err + } + } + + return td, nil +} + +func parseUseLimit(useLimit string) (string, error) { + if useLimit == "" { + return "", fmt.Errorf("use limit cannot be empty") + } + multipliers := 1 + if strings.HasSuffix(useLimit, "Gi") { + multipliers = 1024 * 1024 * 1024 + useLimit = strings.TrimSuffix(useLimit, "Gi") + } else if strings.HasSuffix(useLimit, "Mi") { + multipliers = 1024 * 1024 + useLimit = strings.TrimSuffix(useLimit, "Mi") + } else if strings.HasSuffix(useLimit, "Ki") { + multipliers = 1024 + useLimit = strings.TrimSuffix(useLimit, "Ki") + } else if strings.HasSuffix(useLimit, "B") { + useLimit = strings.TrimSuffix(useLimit, "B") + } else { + return "", fmt.Errorf("invalid use limit format: %s", useLimit) + } + + fmt.Println("Parsed use limit:", useLimit, "with multipliers:", multipliers) + + capacity, err := strconv.Atoi(useLimit) + if err != nil { + return "", fmt.Errorf("failed to parse use limit %s: %w", useLimit, err) + } + if capacity <= 0 { + return "", fmt.Errorf("use limit must be greater than 0: %s", useLimit) + } + capacity *= multipliers + return strconv.Itoa(capacity), nil + +} + +func (o *snapshotter) removeLv(lvName string) error { + vol := &apis.LVMVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: lvName, + }, + Spec: apis.VolumeInfo{ + VolGroup: o.lvmVgName, + }, + } + return lvm.DestroyVolume(vol) +} + +func (o *snapshotter) prepareLvmDirectory(ctx context.Context, snapshotDir string, contentKey string, useLimit string) (string, string, error) { + lvName := "devbox-" + contentKey + td, err := os.MkdirTemp(snapshotDir, "new-") + if err != nil { + return "", "", fmt.Errorf("failed to create temp dir: %w", err) + } + + capacity, err := parseUseLimit(useLimit) + if err != nil { + return td, "", fmt.Errorf("failed to parse use limit %s: %w", useLimit, err) + } + fmt.Println("Parsed use limit:", capacity) + + vol := &apis.LVMVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: lvName, + }, + Spec: apis.VolumeInfo{ + Capacity: capacity, + VolGroup: o.lvmVgName, + }, + } + fmt.Println("Creating LVM volume:", lvName, "with capacity:", capacity, "in volume group:", o.lvmVgName) + err = lvm.CreateVolume(vol) + if err != nil { + return td, "", fmt.Errorf("failed to create LVM logical volume %s: %w", lvName, err) + } + + err = o.mkfs(lvName) + if err != nil { + // If mkfs fails, we should remove the LVM logical volume + if err1 := o.removeLv(lvName); err1 != nil { + log.G(ctx).WithError(err1).WithField("lvName", lvName).Warn("failed to destroy LVM logical volume after mkfs failure") + } + return td, "", fmt.Errorf("failed to create filesystem on LVM logical volume %s: %w", lvName, err) + } + err = o.mountLvm(ctx, lvName, td) + if err != nil { + // If mount fails, we should remove the LVM logical volume + if err1 := o.removeLv(lvName); err1 != nil { + log.G(ctx).WithError(err1).WithField("lvName", lvName).Warn("failed to destroy LVM logical volume after mount failure") + } + return td, "", fmt.Errorf("failed to mount LVM logical volume %s: %w", lvName, err) + } + if err := os.Mkdir(filepath.Join(td, "fs"), 0755); err != nil { + return td, "", fmt.Errorf("failed to create fs directory: %w", err) + } + if err := os.Mkdir(filepath.Join(td, "work"), 0711); err != nil { + return td, "", fmt.Errorf("failed to create work directory: %w", err) + } + + return td, lvName, nil +} + +func (o *snapshotter) mounts(s storage.Snapshot) []mount.Mount { + if len(s.ParentIDs) == 0 { + // if we only have one layer/no parents then just return a bind mount as overlay + // will not work + roFlag := "rw" + if s.Kind == snapshots.KindView { + roFlag = "ro" + } + + return []mount.Mount{ + { + Source: o.upperPath(s.ID), + Type: "bind", + Options: []string{ + roFlag, + "rbind", + }, + }, + } + } + + options := o.options + if s.Kind == snapshots.KindActive { + options = append(options, + fmt.Sprintf("workdir=%s", o.workPath(s.ID)), + fmt.Sprintf("upperdir=%s", o.upperPath(s.ID)), + ) + } else if len(s.ParentIDs) == 1 { + return []mount.Mount{ + { + Source: o.upperPath(s.ParentIDs[0]), + Type: "bind", + Options: []string{ + "ro", + "rbind", + }, + }, + } + } + + parentPaths := make([]string, len(s.ParentIDs)) + for i := range s.ParentIDs { + parentPaths[i] = o.upperPath(s.ParentIDs[i]) + } + + options = append(options, fmt.Sprintf("lowerdir=%s", strings.Join(parentPaths, ":"))) + return []mount.Mount{ + { + Type: "overlay", + Source: "overlay", + Options: options, + }, + } + +} + +func (o *snapshotter) upperPath(id string) string { + return filepath.Join(o.root, "snapshots", id, "fs") +} + +func (o *snapshotter) workPath(id string) string { + return filepath.Join(o.root, "snapshots", id, "work") +} + +// Close closes the snapshotter +func (o *snapshotter) Close() error { + return o.ms.Close() +} + +// supportsIndex checks whether the "index=off" option is supported by the kernel. +func supportsIndex() bool { + if _, err := os.Stat("/sys/module/overlay/parameters/index"); err == nil { + return true + } + return false +} diff --git a/snapshots/devbox/lvm/constants.go b/snapshots/devbox/lvm/constants.go new file mode 100644 index 000000000000..8b79e2f999aa --- /dev/null +++ b/snapshots/devbox/lvm/constants.go @@ -0,0 +1,68 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package lvm + +// lvm vg, lv & pv fields related constants +const ( + VGName = "vg_name" + VGUUID = "vg_uuid" + VGPVvount = "pv_count" + VGLvCount = "lv_count" + VGMaxLv = "max_lv" + VGMaxPv = "max_pv" + VGSnapCount = "snap_count" + VGMissingPvCount = "vg_missing_pv_count" + VGMetadataCount = "vg_mda_count" + VGMetadataUsedCount = "vg_mda_used_count" + VGSize = "vg_size" + VGFreeSize = "vg_free" + VGMetadataSize = "vg_mda_size" + VGMetadataFreeSize = "vg_mda_free" + VGPermissions = "vg_permissions" + VGAllocationPolicy = "vg_allocation_policy" + + LVName = "lv_name" + LVFullName = "lv_full_name" + LVUUID = "lv_uuid" + LVPath = "lv_path" + LVDmPath = "lv_dm_path" + LVActive = "lv_active" + LVSize = "lv_size" + LVMetadataSize = "lv_metadata_size" + LVSegtype = "segtype" + LVHost = "lv_host" + LVPool = "pool_lv" + LVPermissions = "lv_permissions" + LVWhenFull = "lv_when_full" + LVHealthStatus = "lv_health_status" + RaidSyncAction = "raid_sync_action" + LVDataPercent = "data_percent" + LVMetadataPercent = "metadata_percent" + LVSnapPercent = "snap_percent" + + PVName = "pv_name" + PVUUID = "pv_uuid" + PVInUse = "pv_in_use" + PVAllocatable = "pv_allocatable" + PVMissing = "pv_missing" + PVSize = "pv_size" + PVFreeSize = "pv_free" + PVUsedSize = "pv_used" + PVMetadataSize = "pv_mda_size" + PVMetadataFreeSize = "pv_mda_free" + PVDeviceSize = "dev_size" +) diff --git a/snapshots/devbox/lvm/lvm.go b/snapshots/devbox/lvm/lvm.go new file mode 100644 index 000000000000..e1a2726efd4c --- /dev/null +++ b/snapshots/devbox/lvm/lvm.go @@ -0,0 +1,1124 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package lvm + +import ( + "bytes" + "encoding/json" + "fmt" + "os" + "os/exec" + "path/filepath" + "strconv" + "strings" + + "github.com/pkg/errors" + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/klog/v2" + + apis "github.com/openebs/lvm-localpv/pkg/apis/openebs.io/lvm/v1alpha1" +) + +// lvm related constants +const ( + DevPath = "/dev/" + DevMapperPath = "/dev/mapper/" + // MinExtentRoundOffSize represents minimum size (256Mi) to roundoff the volume + // group size in case of thin pool provisioning + MinExtentRoundOffSize = 268435456 + + // BlockCleanerCommand is the command used to clean filesystem on the device + BlockCleanerCommand = "wipefs" +) + +// lvm command related constants +const ( + VGCreate = "vgcreate" + VGList = "vgs" + + LVCreate = "lvcreate" + LVRemove = "lvremove" + LVExtend = "lvextend" + LVList = "lvs" + + PVList = "pvs" + PVScan = "pvscan" + + YES = "yes" + LVThinPool = "thin-pool" +) + +var ( + Enums = map[string][]string{ + "lv_permissions": {"unknown", "writeable", "read-only", "read-only-override"}, + "lv_when_full": {"error", "queue"}, + "raid_sync_action": {"idle", "frozen", "resync", "recover", "check", "repair"}, + "lv_health_status": {"", "partial", "refresh needed", "mismatches exist"}, + "vg_allocation_policy": {"normal", "contiguous", "cling", "anywhere", "inherited"}, + "vg_permissions": {"writeable", "read-only"}, + } +) + +// LogicalVolume specifies attributes of a given lv that exists on the node. +type LogicalVolume struct { + + // Name of the lvm logical volume(name: pvc-213ca1e6-e271-4ec8-875c-c7def3a4908d) + Name string + + // Full name of the lvm logical volume (fullName: linuxlvmvg/pvc-213ca1e6-e271-4ec8-875c-c7def3a4908d) + FullName string + + // UUID denotes a unique identity of a lvm logical volume. + UUID string + + // Size specifies the total size of logical volume in Bytes + Size int64 + + // Path specifies LVM logical volume path + Path string + + // DMPath specifies device mapper path + DMPath string + + // LVM logical volume device + Device string + + // Name of the VG in which LVM logical volume is created + VGName string + + // SegType specifies the type of Logical volume segment + SegType string + + // Permission indicates the logical volume permission. + // Permission has the following mapping between + // int and string for its value: + // [-1: "", 0: "unknown", 1: "writeable", 2: "read-only", 3: "read-only-override"] + Permission int + + // BehaviourWhenFull indicates the behaviour of thin pools when it is full. + // BehaviourWhenFull has the following mapping between int and string for its value: + // [-1: "", 0: "error", 1: "queue"] + BehaviourWhenFull int + + // HealthStatus indicates the health status of logical volumes. + // HealthStatus has the following mapping between int and string for its value: + // [0: "", 1: "partial", 2: "refresh needed", 3: "mismatches exist"] + HealthStatus int + + // RaidSyncAction indicates the current synchronization action being performed for RAID + // action. + // RaidSyncAction has the following mapping between int and string for its value: + // [-1: "", 0: "idle", 1: "frozen", 2: "resync", 3: "recover", 4: "check", 5: "repair"] + RaidSyncAction int + + // ActiveStatus indicates the active state of logical volume + ActiveStatus string + + // Host specifies the creation host of the logical volume, if known + Host string + + // For thin volumes, the thin pool Logical volume for that volume + PoolName string + + // UsedSizePercent specifies the percentage full for snapshot, cache + // and thin pools and volumes if logical volume is active. + UsedSizePercent float64 + + // MetadataSize specifies the size of the logical volume that holds + // the metadata for thin and cache pools. + MetadataSize int64 + + // MetadataUsedPercent specifies the percentage of metadata full if logical volume + // is active for cache and thin pools. + MetadataUsedPercent float64 + + // SnapshotUsedPercent specifies the percentage full for snapshots if + // logical volume is active. + SnapshotUsedPercent float64 +} + +// PhysicalVolume specifies attributes of a given pv that exists on the node. +type PhysicalVolume struct { + // Name of the lvm physical volume. + Name string + + // UUID denotes a unique identity of a lvm physical volume. + UUID string + + // Size specifies the total size of physical volume in bytes + Size resource.Quantity + + // DeviceSize specifies the size of underlying device in bytes + DeviceSize resource.Quantity + + // MetadataSize specifies the size of smallest metadata area on this device in bytes + MetadataSize resource.Quantity + + // MetadataFree specifies the free metadata area space on the device in bytes + MetadataFree resource.Quantity + + // Free specifies the physical volume unallocated space in bytes + Free resource.Quantity + + // Used specifies the physical volume allocated space in bytes + Used resource.Quantity + + // Allocatable indicates whether the device can be used for allocation + Allocatable string + + // Missing indicates whether the device is missing in the system + Missing string + + // InUse indicates whether or not the physical volume is in use + InUse string + + // Name of the volume group which uses this physical volume + VGName string +} + +// ExecError holds the process output along with underlying +// error returned by exec.CombinedOutput function. +type ExecError struct { + Output []byte + Err error +} + +// Error implements the error interface. +func (e *ExecError) Error() string { + return fmt.Sprintf("%v - %v", string(e.Output), e.Err) +} + +func NewExecError(output []byte, err error) error { + if err == nil { + return nil + } + return &ExecError{ + Output: output, + Err: err, + } +} + +// builldLVMCreateArgs returns lvcreate command for the volume +func buildLVMCreateArgs(vol *apis.LVMVolume) []string { + var LVMVolArg []string + + volume := vol.Name + size := vol.Spec.Capacity + "b" + // thinpool name required for thinProvision volumes + pool := vol.Spec.VolGroup + "_thinpool" + + if len(vol.Spec.Capacity) != 0 { + // check if thin pool exists for given volumegroup requested thin volume + if strings.TrimSpace(vol.Spec.ThinProvision) != YES { + LVMVolArg = append(LVMVolArg, "-L", size) + } else if !lvThinExists(vol.Spec.VolGroup, pool) { + // thinpool size can't be equal or greater than actual volumegroup size + LVMVolArg = append(LVMVolArg, "-L", getThinPoolSize(vol.Spec.VolGroup, vol.Spec.Capacity)) + } + } + + // command to create thinpool and thin volume if thinProvision is enabled + // `lvcreate -L 1G -T lvmvg/mythinpool -V 1G -n thinvol` + if strings.TrimSpace(vol.Spec.ThinProvision) == YES { + LVMVolArg = append(LVMVolArg, "-T", vol.Spec.VolGroup+"/"+pool, "-V", size) + } + + if len(vol.Spec.VolGroup) != 0 { + LVMVolArg = append(LVMVolArg, "-n", volume) + } + + if strings.TrimSpace(vol.Spec.ThinProvision) != YES { + LVMVolArg = append(LVMVolArg, vol.Spec.VolGroup) + } + + // -y is used to wipe the signatures before creating LVM volume + LVMVolArg = append(LVMVolArg, "-y") + return LVMVolArg +} + +// builldLVMDestroyArgs returns lvmremove command for the volume +func buildLVMDestroyArgs(vol *apis.LVMVolume) []string { + var LVMVolArg []string + + dev := DevPath + vol.Spec.VolGroup + "/" + vol.Name + + LVMVolArg = append(LVMVolArg, "-y", dev) + + return LVMVolArg +} + +// RunCommandSplit is a wrapper function to run a command and receive its +// STDERR and STDOUT streams in separate []byte vars. +func RunCommandSplit(command string, args ...string) ([]byte, []byte, error) { + var cmdStdout bytes.Buffer + var cmdStderr bytes.Buffer + + cmd := exec.Command(command, args...) + cmd.Stdout = &cmdStdout + cmd.Stderr = &cmdStderr + err := cmd.Run() + + output := cmdStdout.Bytes() + error_output := cmdStderr.Bytes() + + if len(error_output) > 0 { + klog.Warningf("lvm: said into stderr: %s", error_output) + } + + return output, error_output, err +} + +// CreateVolume creates the lvm volume +func CreateVolume(vol *apis.LVMVolume) error { + volume := vol.Spec.VolGroup + "/" + vol.Name + + volExists, err := CheckVolumeExists(vol) + if err != nil { + return err + } + if volExists { + klog.Infof("lvm: volume (%s) already exists, skipping its creation", volume) + return nil + } + + args := buildLVMCreateArgs(vol) + out, _, err := RunCommandSplit(LVCreate, args...) + + if err != nil { + err = NewExecError(out, err) + klog.Errorf( + "lvm: could not create volume %v cmd %v error: %s", volume, args, string(out), + ) + return err + } + klog.Infof("lvm: created volume %s", volume) + + return nil +} + +// DestroyVolume deletes the lvm volume +func DestroyVolume(vol *apis.LVMVolume) error { + if vol.Spec.VolGroup == "" { + klog.Infof("volGroup not set for lvm volume %v, skipping its deletion", vol.Name) + return nil + } + + volume := vol.Spec.VolGroup + "/" + vol.Name + + volExists, err := CheckVolumeExists(vol) + if err != nil { + return err + } + if !volExists { + klog.Infof("lvm: volume (%s) doesn't exists, skipping its deletion", volume) + return nil + } + + err = removeVolumeFilesystem(vol) + if err != nil { + return err + } + + args := buildLVMDestroyArgs(vol) + out, _, err := RunCommandSplit(LVRemove, args...) + + if err != nil { + klog.Errorf( + "lvm: could not destroy volume %v cmd %v error: %s", volume, args, string(out), + ) + return err + } + + klog.Infof("lvm: destroyed volume %s", volume) + + return nil +} + +// CheckVolumeExists validates if lvm volume exists +func CheckVolumeExists(vol *apis.LVMVolume) (bool, error) { + devPath, err := GetVolumeDevPath(vol) + if err != nil { + return false, err + } + if _, err = os.Stat(devPath); err != nil { + if os.IsNotExist(err) { + return false, nil + } + return false, err + } + return true, nil +} + +// GetVolumeDevPath returns devpath for the given volume +func GetVolumeDevPath(vol *apis.LVMVolume) (string, error) { + // LVM doubles the hiphen for the mapper device name + // and uses single hiphen to separate volume group from volume + vg := strings.Replace(vol.Spec.VolGroup, "-", "--", -1) + + lv := strings.Replace(vol.Name, "-", "--", -1) + dev := DevMapperPath + vg + "-" + lv + + return dev, nil +} + +// builldVolumeResizeArgs returns resize command for the lvm volume +func buildVolumeResizeArgs(vol *apis.LVMVolume, resizefs bool) []string { + var LVMVolArg []string + + dev := DevPath + vol.Spec.VolGroup + "/" + vol.Name + size := vol.Spec.Capacity + "b" + + LVMVolArg = append(LVMVolArg, dev, "-L", size) + + if resizefs { + LVMVolArg = append(LVMVolArg, "-r") + } + + return LVMVolArg +} + +// ResizeLVMVolume resizes the underlying LVM volume and FS if resizefs +// is set to true +// Note: +// 1. Triggering `lvextend -L -r` multiple times with +// same size will not return any errors +// 2. Triggering `lvextend -L ` more than one time will +// cause errors +func ResizeLVMVolume(vol *apis.LVMVolume, resizefs bool) error { + + // In case if resizefs is not enabled then check current size + // before exapnding LVM volume(If volume is already expanded then + // it might be error prone). This also makes ResizeLVMVolume func + // idempotent + if !resizefs { + desiredVolSize, err := strconv.ParseUint(vol.Spec.Capacity, 10, 64) + if err != nil { + return err + } + + curVolSize, err := getLVSize(vol) + if err != nil { + return err + } + + // Trigger resize only when desired volume size is greater than + // current volume size else return + if desiredVolSize <= curVolSize { + return nil + } + } + + volume := vol.Spec.VolGroup + "/" + vol.Name + + args := buildVolumeResizeArgs(vol, resizefs) + out, _, err := RunCommandSplit(LVExtend, args...) + + if err != nil { + klog.Errorf( + "lvm: could not resize the volume %v cmd %v error: %s", volume, args, string(out), + ) + } + + return err +} + +// getLVSize will return current LVM volume size in bytes +func getLVSize(vol *apis.LVMVolume) (uint64, error) { + lvmVolumeName := vol.Spec.VolGroup + "/" + vol.Name + + args := []string{ + lvmVolumeName, + "--noheadings", + "-o", "lv_size", + "--units", "b", + "--nosuffix", + } + + raw, _, err := RunCommandSplit(LVList, args...) + if err != nil { + return 0, errors.Wrapf( + err, + "could not get size of volume %v output: %s", + lvmVolumeName, + string(raw), + ) + } + + volSize, err := strconv.ParseUint(strings.TrimSpace(string(raw)), 10, 64) + if err != nil { + return 0, err + } + + return volSize, nil +} + +const LVMVolKey string = "openebs.io/persistent-volume" + +func buildLVMSnapCreateArgs(snap *apis.LVMSnapshot) []string { + var LVMSnapArg []string + + volName := snap.Labels[LVMVolKey] + volPath := DevPath + snap.Spec.VolGroup + "/" + volName + size := snap.Spec.SnapSize + "b" + + LVMSnapArg = append(LVMSnapArg, + // snapshot argument + "--snapshot", + // name of snapshot + "--name", getLVMSnapName(snap.Name), + // set the permission to make the snapshot read-only. By default LVM snapshots are RW + "--permission", "r", + // volume to snapshot + volPath, + ) + + // When creating a thin snapshot volume, you do not specify the size of the volume. + // If you specify a size parameter, the snapshot that will be created will not + // be a thin snapshot volume and will not use the thin pool for storing data. + if len(snap.Spec.SnapSize) != 0 { + // size of the snapshot, will be same or less than source volume + LVMSnapArg = append(LVMSnapArg, "--size", size) + } + return LVMSnapArg +} + +func buildLVMSnapDestroyArgs(snap *apis.LVMSnapshot) []string { + var LVMSnapArg []string + + dev := DevPath + snap.Spec.VolGroup + "/" + getLVMSnapName(snap.Name) + + LVMSnapArg = append(LVMSnapArg, "-y", dev) + + return LVMSnapArg +} + +// CreateSnapshot creates the lvm volume snapshot +func CreateSnapshot(snap *apis.LVMSnapshot) error { + + volume := snap.Labels[LVMVolKey] + + snapVolume := snap.Spec.VolGroup + "/" + getLVMSnapName(snap.Name) + + args := buildLVMSnapCreateArgs(snap) + out, _, err := RunCommandSplit(LVCreate, args...) + + if err != nil { + klog.Errorf("lvm: could not create snapshot %s cmd %v error: %s", snapVolume, args, string(out)) + return err + } + + klog.Infof("created snapshot %s from %s", snapVolume, volume) + return nil + +} + +// DestroySnapshot deletes the lvm volume snapshot +func DestroySnapshot(snap *apis.LVMSnapshot) error { + snapVolume := snap.Spec.VolGroup + "/" + getLVMSnapName(snap.Name) + + ok, err := isSnapshotExists(snap.Spec.VolGroup, getLVMSnapName(snap.Name)) + if !ok { + klog.Infof("lvm: snapshot %s does not exist, skipping deletion", snapVolume) + return nil + } + + if err != nil { + klog.Errorf("lvm: error checking for snapshot %s, error: %v", snapVolume, err) + return err + } + + args := buildLVMSnapDestroyArgs(snap) + out, _, err := RunCommandSplit(LVRemove, args...) + + if err != nil { + klog.Errorf("lvm: could not remove snapshot %s cmd %v error: %s", snapVolume, args, string(out)) + return err + } + + klog.Infof("removed snapshot %s", snapVolume) + return nil + +} + +// getSnapName is used to remove the snapshot prefix from the snapname. since names starting +// with "snapshot" are reserved in lvm2 +func getLVMSnapName(snapName string) string { + return strings.TrimPrefix(snapName, "snapshot-") +} + +func decodeVgsJSON(raw []byte) ([]apis.VolumeGroup, error) { + output := &struct { + Report []struct { + VolumeGroups []map[string]string `json:"vg"` + } `json:"report"` + }{} + var err error + if err = json.Unmarshal(raw, output); err != nil { + klog.Errorf("json: failed to unmarshal:\n%s", raw) + return nil, err + } + + if len(output.Report) != 1 { + return nil, fmt.Errorf("expected exactly one lvm report") + } + + items := output.Report[0].VolumeGroups + vgs := make([]apis.VolumeGroup, 0, len(items)) + for _, item := range items { + var vg apis.VolumeGroup + if vg, err = parseVolumeGroup(item); err != nil { + return vgs, err + } + vgs = append(vgs, vg) + } + return vgs, nil +} + +func parseVolumeGroup(m map[string]string) (apis.VolumeGroup, error) { + var vg apis.VolumeGroup + var count int + var sizeBytes int64 + var err error + + vg.Name = m[VGName] + vg.UUID = m[VGUUID] + + int32Map := map[string]*int32{ + VGPVvount: &vg.PVCount, + VGLvCount: &vg.LVCount, + VGMaxLv: &vg.MaxLV, + VGMaxPv: &vg.MaxPV, + VGSnapCount: &vg.SnapCount, + VGMissingPvCount: &vg.MissingPVCount, + VGMetadataCount: &vg.MetadataCount, + VGMetadataUsedCount: &vg.MetadataUsedCount, + } + for key, value := range int32Map { + count, err = strconv.Atoi(m[key]) + if err != nil { + err = fmt.Errorf("invalid format of %v=%v for vg %v: %v", key, m[key], vg.Name, err) + } + *value = int32(count) + } + + resQuantityMap := map[string]*resource.Quantity{ + VGSize: &vg.Size, + VGFreeSize: &vg.Free, + VGMetadataSize: &vg.MetadataSize, + VGMetadataFreeSize: &vg.MetadataFree, + } + + for key, value := range resQuantityMap { + sizeBytes, err = strconv.ParseInt( + strings.TrimSuffix(strings.ToLower(m[key]), "b"), + 10, 64) + if err != nil { + err = fmt.Errorf("invalid format of %v=%v for vg %v: %v", key, m[key], vg.Name, err) + } + quantity := resource.NewQuantity(sizeBytes, resource.BinarySI) + *value = *quantity // + } + + vg.Permission = getIntFieldValue(VGPermissions, m[VGPermissions]) + vg.AllocationPolicy = getIntFieldValue(VGAllocationPolicy, m[VGAllocationPolicy]) + + return vg, err +} + +// This function returns the integer equivalent for different string values for the LVM component(vg,lv) field. +// -1 represents undefined. +func getIntFieldValue(fieldName, fieldValue string) int { + mv := -1 + for i, v := range Enums[fieldName] { + if v == fieldValue { + mv = i + break + } + } + return mv +} + +// ReloadLVMMetadataCache refreshes lvmetad daemon cache used for +// serving vgs or other lvm utility. +func ReloadLVMMetadataCache() error { + args := []string{"--cache"} + output, _, err := RunCommandSplit(PVScan, args...) + if err != nil { + klog.Errorf("lvm: reload lvm metadata cache: %v - %v", string(output), err) + return err + } + + return nil +} + +// ListLVMVolumeGroup invokes `vgs` to list all the available volume +// groups in the node. +// +// In case reloadCache is false, we skip refreshing lvm metadata cache. +func ListLVMVolumeGroup(reloadCache bool) ([]apis.VolumeGroup, error) { + if reloadCache { + if err := ReloadLVMMetadataCache(); err != nil { + return nil, err + } + } + + args := []string{ + "--options", "vg_all", + "--reportformat", "json", + "--units", "b", + } + output, _, err := RunCommandSplit(VGList, args...) + if err != nil { + klog.Errorf("lvm: list volume group cmd %v: %v", args, err) + return nil, err + } + + return decodeVgsJSON(output) +} + +// Function to get LVM Logical volume device +// It returns LVM logical volume device(dm-*). +// This is used as a label in metrics(lvm_lv_total_size) which helps us to map lv_name to device. +// +// Example: pvc-f147582c-adbd-4015-8ca9-fe3e0a4c2452(lv_name) -> dm-0(device) +func getLvDeviceName(path string) (string, error) { + dmPath, err := filepath.EvalSymlinks(path) + if err != nil { + klog.Errorf("failed to resolve device mapper from lv path %v: %v", path, err) + return "", err + } + deviceName := strings.Split(dmPath, "/") + return deviceName[len(deviceName)-1], nil +} + +// To parse the output of lvs command and store it in LogicalVolume +// It returns LogicalVolume. +// +// Example: LogicalVolume{ +// Name: "pvc-082c7975-9af2-4a50-9d24-762612b35f94", +// FullName: "vg_thin/pvc-082c7975-9af2-4a50-9d24-762612b35f94" +// UUID: "FBqcEe-Ln72-SmWO-fR4j-t4Ga-1Y90-0vieKW" +// Size: 4294967296, +// Path: "/dev/vg_thin/pvc-082c7975-9af2-4a50-9d24-762612b35f94", +// DMPath: "/dev/mapper/vg_thin-pvc--082c7975--9af2--4a50--9d24--762612b35f94" +// Device: "dm-5" +// VGName: "vg_thin" +// SegType: "thin" +// Permission: 1 +// BehaviourWhenFull: -1 +// HealthStatus: 0 +// RaidSyncAction: -1 +// ActiveStatus: "active" +// Host: "node1-virtual-machine" +// PoolName: "vg_thin_thinpool" +// UsedSizePercent: 0 +// MetadataSize: 0 +// MetadataUsedPercent: 0 +// SnapshotUsedPercent: 0 +// } +func parseLogicalVolume(m map[string]string) (LogicalVolume, error) { + var lv LogicalVolume + var err error + var sizeBytes int64 + var count float64 + + lv.Name = m[LVName] + lv.FullName = m[LVFullName] + lv.UUID = m[LVUUID] + lv.Path = m[LVPath] + lv.DMPath = m[LVDmPath] + lv.VGName = m[VGName] + lv.ActiveStatus = m[LVActive] + + int64Map := map[string]*int64{ + LVSize: &lv.Size, + LVMetadataSize: &lv.MetadataSize, + } + for key, value := range int64Map { + // Check if the current LV is not a thin pool. If not then + // metadata size will not be present as metadata is only + // stored for thin pools. + if m[LVSegtype] != LVThinPool && key == LVMetadataSize { + sizeBytes = 0 + } else { + sizeBytes, err = strconv.ParseInt(strings.TrimSuffix(strings.ToLower(m[key]), "b"), 10, 64) + if err != nil { + err = fmt.Errorf("invalid format of %v=%v for vg %v: %v", key, m[key], lv.Name, err) + return lv, err + } + } + *value = sizeBytes + } + + lv.SegType = m[LVSegtype] + lv.Host = m[LVHost] + lv.PoolName = m[LVPool] + lv.Permission = getIntFieldValue(LVPermissions, m[LVPermissions]) + lv.BehaviourWhenFull = getIntFieldValue(LVWhenFull, m[LVWhenFull]) + lv.HealthStatus = getIntFieldValue(LVHealthStatus, m[LVHealthStatus]) + lv.RaidSyncAction = getIntFieldValue(RaidSyncAction, m[RaidSyncAction]) + + float64Map := map[string]*float64{ + LVDataPercent: &lv.UsedSizePercent, + LVMetadataPercent: &lv.MetadataUsedPercent, + LVSnapPercent: &lv.SnapshotUsedPercent, + } + for key, value := range float64Map { + if m[key] == "" { + count = 0 + } else { + count, err = strconv.ParseFloat(m[key], 64) + if err != nil { + err = fmt.Errorf("invalid format of %v=%v for lv %v: %v", key, m[key], lv.Name, err) + return lv, err + } + } + *value = count + } + + return lv, err +} + +// decodeLvsJSON([]bytes): Decode json format and pass the unmarshalled json to parseLogicalVolume to store logical volumes in LogicalVolume +// +// Output of lvs command will be in json format: +// +// { +// "report": [ +// { +// "lv": [ +// { +// "lv_name":"pvc-082c7975-9af2-4a50-9d24-762612b35f94", +// ... +// } +// ] +// } +// ] +// } +// +// This function is used to decode the output of lvs command. +// It returns []LogicalVolume. +// +// Example: []LogicalVolume{ +// { +// Name: "pvc-082c7975-9af2-4a50-9d24-762612b35f94", +// FullName: "vg_thin/pvc-082c7975-9af2-4a50-9d24-762612b35f94" +// UUID: "FBqcEe-Ln72-SmWO-fR4j-t4Ga-1Y90-0vieKW" +// Size: 4294967296, +// Path: "/dev/vg_thin/pvc-082c7975-9af2-4a50-9d24-762612b35f94", +// DMPath: "/dev/mapper/vg_thin-pvc--082c7975--9af2--4a50--9d24--762612b35f94" +// Device: "dm-5" +// VGName: "vg_thin" +// SegType: "thin" +// Permission: 1 +// BehaviourWhenFull: -1 +// HealthStatus: 0 +// RaidSyncAction: -1 +// ActiveStatus: "active" +// Host: "node1-virtual-machine" +// PoolName: "vg_thin_thinpool" +// UsedSizePercent: 0 +// MetadataSize: 0 +// MetadataUsedPercent: 0 +// SnapshotUsedPercent: 0 +// } +// } +func decodeLvsJSON(raw []byte) ([]LogicalVolume, error) { + output := &struct { + Report []struct { + LogicalVolumes []map[string]string `json:"lv"` + } `json:"report"` + }{} + var err error + if err = json.Unmarshal(raw, output); err != nil { + return nil, err + } + + if len(output.Report) != 1 { + return nil, fmt.Errorf("expected exactly one lvm report") + } + + items := output.Report[0].LogicalVolumes + lvs := make([]LogicalVolume, 0, len(items)) + for _, item := range items { + var lv LogicalVolume + if lv, err = parseLogicalVolume(item); err != nil { + return lvs, err + } + deviceName, err := getLvDeviceName(lv.Path) + if err != nil { + klog.Error(err) + return nil, err + } + lv.Device = deviceName + lvs = append(lvs, lv) + } + return lvs, nil +} + +func ListLVMLogicalVolume() ([]LogicalVolume, error) { + args := []string{ + "--options", "lv_all,vg_name,segtype", + "--reportformat", "json", + "--units", "b", + } + output, _, err := RunCommandSplit(LVList, args...) + if err != nil { + klog.Errorf("lvm: error while running command %s %v: %v", LVList, args, err) + return nil, err + } + + return decodeLvsJSON(output) +} + +// modified by sealos +func ListLVMLogicalVolumeByVG(vg string) ([]LogicalVolume, error) { + if err := ReloadLVMMetadataCache(); err != nil { + return nil, err + } + + args := []string{ + "--options", "lv_all,vg_name,segtype", + "--reportformat", "json", + "--units", "b", + "--select", fmt.Sprintf("vg_name=%s", vg), + } + output, _, err := RunCommandSplit(LVList, args...) + if err != nil { + klog.Errorf("lvm: error while running command %s %v: %v", LVList, args, err) + return nil, err + } + + return decodeLvsJSON(output) +} + +// end modified by sealos + +/* +ListLVMPhysicalVolume invokes `pvs` to list all the available LVM physical volumes in the node. +*/ +func ListLVMPhysicalVolume() ([]PhysicalVolume, error) { + if err := ReloadLVMMetadataCache(); err != nil { + return nil, err + } + + args := []string{ + "--options", "pv_all,vg_name", + "--reportformat", "json", + "--units", "b", + } + output, _, err := RunCommandSplit(PVList, args...) + if err != nil { + klog.Errorf("lvm: error while running command %s %v: %v", PVList, args, err) + return nil, err + } + + return decodePvsJSON(output) +} + +// To parse the output of pvs command and store it in PhysicalVolume +// It returns PhysicalVolume. +// +// Example: PhysicalVolume{ +// Name: "/dev/sdc", +// UUID: "UAdQl0-dK00-gM1V-6Vda-zYeu-XUdQ-izs8KW" +// Size: 21441282048 +// Used: 8657043456 +// Free: 12784238592 +// MetadataSize: 1044480 +// MetadataFree: 518656 +// DeviceSize: 21474836480 +// Allocatable: "allocatable" +// InUse: "used" +// Missing: "" +// VGName: "vg_thin" +// } +func parsePhysicalVolume(m map[string]string) (PhysicalVolume, error) { + var pv PhysicalVolume + var err error + var sizeBytes int64 + + pv.Name = m[PVName] + pv.UUID = m[PVUUID] + pv.InUse = m[PVInUse] + pv.Allocatable = m[PVAllocatable] + pv.Missing = m[PVMissing] + pv.VGName = m[VGName] + + resQuantityMap := map[string]*resource.Quantity{ + PVSize: &pv.Size, + PVFreeSize: &pv.Free, + PVUsedSize: &pv.Used, + PVMetadataSize: &pv.MetadataSize, + PVMetadataFreeSize: &pv.MetadataFree, + PVDeviceSize: &pv.DeviceSize, + } + + for key, value := range resQuantityMap { + sizeBytes, err = strconv.ParseInt( + strings.TrimSuffix(strings.ToLower(m[key]), "b"), + 10, 64) + if err != nil { + err = fmt.Errorf("invalid format of %v=%v for pv %v: %v", key, m[key], pv.Name, err) + return pv, err + } + quantity := resource.NewQuantity(sizeBytes, resource.BinarySI) + *value = *quantity + } + + return pv, err +} + +// decodeLvsJSON([]bytes): Decode json format and pass the unmarshalled json to parsePhysicalVolume to store physical volumes in PhysicalVolume +// +// Output of pvs command will be in json format: +// +// { +// "report": [ +// { +// "pv": [ +// { +// "pv_name":"/dev/sdc", +// ... +// } +// ] +// } +// ] +// } +// +// This function is used to decode the output of pvs command. +// It returns []PhysicalVolume. +// +// Example: []PhysicalVolume{ +// { +// Name: "/dev/sdc", +// UUID: "UAdQl0-dK00-gM1V-6Vda-zYeu-XUdQ-izs8KW" +// Size: 21441282048 +// Used: 8657043456 +// Free: 12784238592 +// MetadataSize: 1044480 +// MetadataFree: 518656 +// DeviceSize: 21474836480 +// Allocatable: "allocatable" +// InUse: "used" +// Missing: "" +// VGName: "vg_thin" +// } +// } +func decodePvsJSON(raw []byte) ([]PhysicalVolume, error) { + output := &struct { + Report []struct { + PhysicalVolume []map[string]string `json:"pv"` + } `json:"report"` + }{} + var err error + if err = json.Unmarshal(raw, output); err != nil { + return nil, err + } + + if len(output.Report) != 1 { + return nil, fmt.Errorf("expected exactly one lvm report") + } + + items := output.Report[0].PhysicalVolume + pvs := make([]PhysicalVolume, 0, len(items)) + for _, item := range items { + var pv PhysicalVolume + if pv, err = parsePhysicalVolume(item); err != nil { + return pvs, err + } + pvs = append(pvs, pv) + } + return pvs, nil +} + +// lvThinExists verifies if thin pool/volume already exists for given volumegroup +func lvThinExists(vg string, name string) bool { + out, _, err := RunCommandSplit("lvs", vg+"/"+name, "--noheadings", "-o", "lv_name") + if err != nil { + klog.Errorf("failed to list existing volumes:%v", err) + return false + } + + return name == strings.TrimSpace(string(out)) +} + +// snapshotExists checks if a snapshot volume exists for the given volumegroup +// and snapshot name. +func isSnapshotExists(vg, snapVolumeName string) (bool, error) { + out, _, err := RunCommandSplit("lvs", vg+"/"+snapVolumeName, "--noheadings", "-o", "lv_name") + if err != nil { + return false, err + } + return snapVolumeName == strings.TrimSpace(string(out)), nil +} + +// getVGSize get the size in bytes for given volumegroup name +func getVGSize(vgname string) string { + out, _, err := RunCommandSplit("vgs", vgname, "--noheadings", "-o", "vg_free", "--units", "b", "--nosuffix") + if err != nil { + klog.Errorf("failed to list existing volumegroup:%v , %v", vgname, err) + return "" + } + return strings.TrimSpace(string(out)) +} + +// getThinPoolSize gets size for a given volumegroup, compares it with +// the requested volume size and returns the minimum size as a thin pool size +func getThinPoolSize(vgname, volsize string) string { + outStr := getVGSize(vgname) + vgFreeSize, err := strconv.ParseInt(strings.TrimSpace(string(outStr)), 10, 64) + if err != nil { + klog.Errorf("failed to convert vg_size to int, got size,:%v , %v", outStr, err) + return "" + } + + volSize, err := strconv.ParseInt(strings.TrimSpace(string(volsize)), 10, 64) + if err != nil { + klog.Errorf("failed to convert volsize to int, got size,:%v , %v", volSize, err) + return "" + } + + if vgFreeSize < volSize { + // reducing 268435456 bytes (256Mi) from the total byte size to round off + // blocks extent + return fmt.Sprint(vgFreeSize-MinExtentRoundOffSize) + "b" + } + return volsize + "b" +} + +// removeVolumeFilesystem will erases the filesystem signature from lvm volume +func removeVolumeFilesystem(lvmVolume *apis.LVMVolume) error { + devicePath := filepath.Join(DevPath, lvmVolume.Spec.VolGroup, lvmVolume.Name) + + // wipefs erases the filesystem signature from the lvm volume + // -a wipe all magic strings + // -f force erasure + // Command: wipefs -af /dev/lvmvg/volume1 + cleanCommand := exec.Command(BlockCleanerCommand, "-af", devicePath) + output, err := cleanCommand.CombinedOutput() + if err != nil { + return errors.Wrapf( + err, + "failed to wipe filesystem on device path: %s resp: %s", + devicePath, + string(output), + ) + } + klog.V(4).Infof("Successfully wiped filesystem on device path: %s", devicePath) + return nil +} diff --git a/snapshots/devbox/plugin/plugin.go b/snapshots/devbox/plugin/plugin.go new file mode 100644 index 000000000000..a9d56a39e078 --- /dev/null +++ b/snapshots/devbox/plugin/plugin.go @@ -0,0 +1,82 @@ +//go:build linux + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package overlay + +import ( + "errors" + + "github.com/containerd/containerd/plugin" + "github.com/containerd/containerd/snapshots/devbox" + "github.com/containerd/platforms" +) + +// Config represents configuration for the overlay plugin. +type Config struct { + // Root directory for the plugin + RootPath string `toml:"root_path"` + UpperdirLabel bool `toml:"upperdir_label"` + SyncRemove bool `toml:"sync_remove"` + lvmVgName string `toml:"lvm_vg_name"` + + // MountOptions are options used for the overlay mount (not used on bind mounts) + MountOptions []string `toml:"mount_options"` +} + +func init() { + plugin.Register(&plugin.Registration{ + Type: plugin.SnapshotPlugin, + ID: "devbox", + Config: &Config{}, + InitFn: func(ic *plugin.InitContext) (any, error) { + ic.Meta.Platforms = append(ic.Meta.Platforms, platforms.DefaultSpec()) + + config, ok := ic.Config.(*Config) + if !ok { + return nil, errors.New("invalid devbox configuration") + } + + root := ic.Root + if config.RootPath != "" { + root = config.RootPath + } + + var oOpts []devbox.Opt + if config.UpperdirLabel { + oOpts = append(oOpts, devbox.WithUpperdirLabel) + } + if !config.SyncRemove { + oOpts = append(oOpts, devbox.AsynchronousRemove) + } + + if len(config.MountOptions) > 0 { + oOpts = append(oOpts, devbox.WithMountOptions(config.MountOptions)) + } + + if config.lvmVgName == "" { + // If no LVM VG name is provided, use the default. + config.lvmVgName = "ubuntu-vg" + } + oOpts = append(oOpts, devbox.WithLvmVgName(config.lvmVgName)) + + ic.Meta.Exports[plugin.SnapshotterRootDir] = root + return devbox.NewSnapshotter(root, oOpts...) + + }, + }) +} diff --git a/snapshots/devbox/storage/bolt.go b/snapshots/devbox/storage/bolt.go new file mode 100644 index 000000000000..a22581cac205 --- /dev/null +++ b/snapshots/devbox/storage/bolt.go @@ -0,0 +1,846 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package storage + +import ( + "context" + "encoding/binary" + "errors" + "fmt" + "strings" + "time" + + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/filters" + "github.com/containerd/containerd/metadata/boltutil" + "github.com/containerd/containerd/snapshots" + bolt "go.etcd.io/bbolt" +) + +var ( + bucketKeyStorageVersion = []byte("v1") + bucketKeySnapshot = []byte("snapshots") + bucketKeyParents = []byte("parents") + + bucketKeyID = []byte("id") + bucketKeyParent = []byte("parent") + bucketKeyKind = []byte("kind") + bucketKeyInodes = []byte("inodes") + bucketKeySize = []byte("size") + + // ErrNoTransaction is returned when an operation is attempted with + // a context which is not inside of a transaction. + ErrNoTransaction = errors.New("no transaction in context") + + // modified by sealos + DevboxKeyContentID = []byte("content_id") + + DevboxStoragePathBucket = []byte("devbox_storage_path") + DevboxKeyPath = []byte("path") + DevboxKeyLvName = []byte("lv_name") + DevboxKeyStatus = []byte("status") + + DevboxStatusActive = []byte("active") + DevboxStatusRemoved = []byte("removed") +) + +// parentKey returns a composite key of the parent and child identifiers. The +// parts of the key are separated by a zero byte. +func parentKey(parent, child uint64) []byte { + b := make([]byte, binary.Size([]uint64{parent, child})+1) + i := binary.PutUvarint(b, parent) + j := binary.PutUvarint(b[i+1:], child) + return b[0 : i+j+1] +} + +// parentPrefixKey returns the parent part of the composite key with the +// zero byte separator. +func parentPrefixKey(parent uint64) []byte { + b := make([]byte, binary.Size(parent)+1) + i := binary.PutUvarint(b, parent) + return b[0 : i+1] +} + +// getParentPrefix returns the first part of the composite key which +// represents the parent identifier. +func getParentPrefix(b []byte) uint64 { + parent, _ := binary.Uvarint(b) + return parent +} + +// GetInfo returns the snapshot Info directly from the metadata. Requires a +// context with a storage transaction. +func GetInfo(ctx context.Context, key string) (string, snapshots.Info, snapshots.Usage, error) { + var ( + id uint64 + su snapshots.Usage + si = snapshots.Info{ + Name: key, + } + ) + err := withSnapshotBucket(ctx, key, func(ctx context.Context, bkt, pbkt *bolt.Bucket) error { + getUsage(bkt, &su) + return readSnapshot(bkt, &id, &si) + }) + if err != nil { + return "", snapshots.Info{}, snapshots.Usage{}, err + } + + return fmt.Sprintf("%d", id), si, su, nil +} + +// UpdateInfo updates an existing snapshot info's data +func UpdateInfo(ctx context.Context, info snapshots.Info, fieldpaths ...string) (snapshots.Info, error) { + updated := snapshots.Info{ + Name: info.Name, + } + err := withBucket(ctx, func(ctx context.Context, bkt, pbkt *bolt.Bucket) error { + sbkt := bkt.Bucket([]byte(info.Name)) + if sbkt == nil { + return fmt.Errorf("snapshot does not exist: %w", errdefs.ErrNotFound) + } + if err := readSnapshot(sbkt, nil, &updated); err != nil { + return err + } + + if len(fieldpaths) > 0 { + for _, path := range fieldpaths { + if strings.HasPrefix(path, "labels.") { + if updated.Labels == nil { + updated.Labels = map[string]string{} + } + + key := strings.TrimPrefix(path, "labels.") + updated.Labels[key] = info.Labels[key] + continue + } + + switch path { + case "labels": + updated.Labels = info.Labels + default: + return fmt.Errorf("cannot update %q field on snapshot %q: %w", path, info.Name, errdefs.ErrInvalidArgument) + } + } + } else { + // Set mutable fields + updated.Labels = info.Labels + } + updated.Updated = time.Now().UTC() + if err := boltutil.WriteTimestamps(sbkt, updated.Created, updated.Updated); err != nil { + return err + } + + return boltutil.WriteLabels(sbkt, updated.Labels) + }) + if err != nil { + return snapshots.Info{}, err + } + return updated, nil +} + +// WalkInfo iterates through all metadata Info for the stored snapshots and +// calls the provided function for each. Requires a context with a storage +// transaction. +func WalkInfo(ctx context.Context, fn snapshots.WalkFunc, fs ...string) error { + filter, err := filters.ParseAll(fs...) + if err != nil { + return err + } + // TODO: allow indexes (name, parent, specific labels) + return withBucket(ctx, func(ctx context.Context, bkt, pbkt *bolt.Bucket) error { + return bkt.ForEach(func(k, v []byte) error { + // skip non buckets + if v != nil { + return nil + } + var ( + sbkt = bkt.Bucket(k) + si = snapshots.Info{ + Name: string(k), + } + ) + if err := readSnapshot(sbkt, nil, &si); err != nil { + return err + } + if !filter.Match(adaptSnapshot(si)) { + return nil + } + + return fn(ctx, si) + }) + }) +} + +// GetSnapshot returns the metadata for the active or view snapshot transaction +// referenced by the given key. Requires a context with a storage transaction. +func GetSnapshot(ctx context.Context, key string) (s Snapshot, err error) { + err = withBucket(ctx, func(ctx context.Context, bkt, pbkt *bolt.Bucket) error { + sbkt := bkt.Bucket([]byte(key)) + if sbkt == nil { + return fmt.Errorf("snapshot does not exist: %w", errdefs.ErrNotFound) + } + + s.ID = fmt.Sprintf("%d", readID(sbkt)) + s.Kind = readKind(sbkt) + + if s.Kind != snapshots.KindActive && s.Kind != snapshots.KindView { + return fmt.Errorf("requested snapshot %v not active or view: %w", key, errdefs.ErrFailedPrecondition) + } + + if parentKey := sbkt.Get(bucketKeyParent); len(parentKey) > 0 { + spbkt := bkt.Bucket(parentKey) + if spbkt == nil { + return fmt.Errorf("parent does not exist: %w", errdefs.ErrNotFound) + } + + s.ParentIDs, err = parents(bkt, spbkt, readID(spbkt)) + if err != nil { + return fmt.Errorf("failed to get parent chain: %w", err) + } + } + return nil + }) + if err != nil { + return Snapshot{}, err + } + + return +} + +// CreateSnapshot inserts a record for an active or view snapshot with the provided parent. +func CreateSnapshot(ctx context.Context, kind snapshots.Kind, key, parent string, opts ...snapshots.Opt) (s Snapshot, err error) { + switch kind { + case snapshots.KindActive, snapshots.KindView: + default: + return Snapshot{}, fmt.Errorf("snapshot type %v invalid; only snapshots of type Active or View can be created: %w", kind, errdefs.ErrInvalidArgument) + } + var base snapshots.Info + for _, opt := range opts { + if err := opt(&base); err != nil { + return Snapshot{}, err + } + } + + err = createBucketIfNotExists(ctx, func(ctx context.Context, bkt, pbkt *bolt.Bucket) error { + var ( + spbkt *bolt.Bucket + ) + if parent != "" { + spbkt = bkt.Bucket([]byte(parent)) + if spbkt == nil { + return fmt.Errorf("missing parent %q bucket: %w", parent, errdefs.ErrNotFound) + } + + if readKind(spbkt) != snapshots.KindCommitted { + return fmt.Errorf("parent %q is not committed snapshot: %w", parent, errdefs.ErrInvalidArgument) + } + } + sbkt, err := bkt.CreateBucket([]byte(key)) + if err != nil { + if err == bolt.ErrBucketExists { + err = fmt.Errorf("snapshot %v: %w", key, errdefs.ErrAlreadyExists) + } + return err + } + + id, err := bkt.NextSequence() + if err != nil { + return fmt.Errorf("unable to get identifier for snapshot %q: %w", key, err) + } + + t := time.Now().UTC() + si := snapshots.Info{ + Parent: parent, + Kind: kind, + Labels: base.Labels, + Created: t, + Updated: t, + } + if err := putSnapshot(sbkt, id, si); err != nil { + return err + } + + if spbkt != nil { + pid := readID(spbkt) + + // Store a backlink from the key to the parent. Store the snapshot name + // as the value to allow following the backlink to the snapshot value. + if err := pbkt.Put(parentKey(pid, id), []byte(key)); err != nil { + return fmt.Errorf("failed to write parent link for snapshot %q: %w", key, err) + } + + s.ParentIDs, err = parents(bkt, spbkt, pid) + if err != nil { + return fmt.Errorf("failed to get parent chain for snapshot %q: %w", key, err) + } + } + + s.ID = fmt.Sprintf("%d", id) + s.Kind = kind + return nil + }) + if err != nil { + return Snapshot{}, err + } + + return +} + +// Remove removes a snapshot from the metastore. The string identifier for the +// snapshot is returned as well as the kind. The provided context must contain a +// writable transaction. +func Remove(ctx context.Context, key string) (string, snapshots.Kind, error) { + var ( + id uint64 + si snapshots.Info + ) + + if err := withBucket(ctx, func(ctx context.Context, bkt, pbkt *bolt.Bucket) error { + sbkt := bkt.Bucket([]byte(key)) + if sbkt == nil { + return fmt.Errorf("snapshot %v: %w", key, errdefs.ErrNotFound) + } + + if err := readSnapshot(sbkt, &id, &si); err != nil { + return fmt.Errorf("failed to read snapshot %s: %w", key, err) + } + + if pbkt != nil { + k, _ := pbkt.Cursor().Seek(parentPrefixKey(id)) + if getParentPrefix(k) == id { + return fmt.Errorf("cannot remove snapshot with child: %w", errdefs.ErrFailedPrecondition) + } + + if si.Parent != "" { + spbkt := bkt.Bucket([]byte(si.Parent)) + if spbkt == nil { + return fmt.Errorf("snapshot %v: %w", key, errdefs.ErrNotFound) + } + + if err := pbkt.Delete(parentKey(readID(spbkt), id)); err != nil { + return fmt.Errorf("failed to delete parent link: %w", err) + } + } + } + + if err := bkt.DeleteBucket([]byte(key)); err != nil { + return fmt.Errorf("failed to delete snapshot: %w", err) + } + + return nil + }); err != nil { + return "", 0, err + } + + return fmt.Sprintf("%d", id), si.Kind, nil +} + +// CommitActive renames the active snapshot transaction referenced by `key` +// as a committed snapshot referenced by `Name`. The resulting snapshot will be +// committed and readonly. The `key` reference will no longer be available for +// lookup or removal. The returned string identifier for the committed snapshot +// is the same identifier of the original active snapshot. The provided context +// must contain a writable transaction. +func CommitActive(ctx context.Context, key, name string, usage snapshots.Usage, opts ...snapshots.Opt) (string, error) { + var ( + id uint64 + base snapshots.Info + ) + for _, opt := range opts { + if err := opt(&base); err != nil { + return "", err + } + } + + if err := withBucket(ctx, func(ctx context.Context, bkt, pbkt *bolt.Bucket) error { + dbkt, err := bkt.CreateBucket([]byte(name)) + if err != nil { + if err == bolt.ErrBucketExists { + err = errdefs.ErrAlreadyExists + } + return fmt.Errorf("committed snapshot %v: %w", name, err) + } + sbkt := bkt.Bucket([]byte(key)) + if sbkt == nil { + return fmt.Errorf("failed to get active snapshot %q: %w", key, errdefs.ErrNotFound) + } + + var si snapshots.Info + if err := readSnapshot(sbkt, &id, &si); err != nil { + return fmt.Errorf("failed to read active snapshot %q: %w", key, err) + } + + if si.Kind != snapshots.KindActive { + return fmt.Errorf("snapshot %q is not active: %w", key, errdefs.ErrFailedPrecondition) + } + si.Kind = snapshots.KindCommitted + si.Created = time.Now().UTC() + si.Updated = si.Created + + // Replace labels, do not inherit + si.Labels = base.Labels + + if err := putSnapshot(dbkt, id, si); err != nil { + return err + } + if err := putUsage(dbkt, usage); err != nil { + return err + } + if err := bkt.DeleteBucket([]byte(key)); err != nil { + return fmt.Errorf("failed to delete active snapshot %q: %w", key, err) + } + if si.Parent != "" { + spbkt := bkt.Bucket([]byte(si.Parent)) + if spbkt == nil { + return fmt.Errorf("missing parent %q of snapshot %q: %w", si.Parent, key, errdefs.ErrNotFound) + } + pid := readID(spbkt) + + // Updates parent back link to use new key + if err := pbkt.Put(parentKey(pid, id), []byte(name)); err != nil { + return fmt.Errorf("failed to update parent link %q from %q to %q: %w", pid, key, name, err) + } + } + + return nil + }); err != nil { + return "", err + } + + return fmt.Sprintf("%d", id), nil +} + +// IDMap returns all the IDs mapped to their key +func IDMap(ctx context.Context) (map[string]string, error) { + m := map[string]string{} + if err := withBucket(ctx, func(ctx context.Context, bkt, _ *bolt.Bucket) error { + return bkt.ForEach(func(k, v []byte) error { + // skip non buckets + if v != nil { + return nil + } + id := readID(bkt.Bucket(k)) + m[fmt.Sprintf("%d", id)] = string(k) + return nil + }) + }); err != nil { + return nil, err + } + + return m, nil +} + +func withSnapshotBucket(ctx context.Context, key string, fn func(context.Context, *bolt.Bucket, *bolt.Bucket) error) error { + tx, ok := ctx.Value(transactionKey{}).(*bolt.Tx) + if !ok { + return ErrNoTransaction + } + vbkt := tx.Bucket(bucketKeyStorageVersion) + if vbkt == nil { + return fmt.Errorf("bucket does not exist: %w", errdefs.ErrNotFound) + } + bkt := vbkt.Bucket(bucketKeySnapshot) + if bkt == nil { + return fmt.Errorf("snapshots bucket does not exist: %w", errdefs.ErrNotFound) + } + bkt = bkt.Bucket([]byte(key)) + if bkt == nil { + return fmt.Errorf("snapshot does not exist: %w", errdefs.ErrNotFound) + } + + return fn(ctx, bkt, vbkt.Bucket(bucketKeyParents)) +} + +func withBucket(ctx context.Context, fn func(context.Context, *bolt.Bucket, *bolt.Bucket) error) error { + tx, ok := ctx.Value(transactionKey{}).(*bolt.Tx) + if !ok { + return ErrNoTransaction + } + bkt := tx.Bucket(bucketKeyStorageVersion) + if bkt == nil { + return fmt.Errorf("bucket does not exist: %w", errdefs.ErrNotFound) + } + return fn(ctx, bkt.Bucket(bucketKeySnapshot), bkt.Bucket(bucketKeyParents)) +} + +func createBucketIfNotExists(ctx context.Context, fn func(context.Context, *bolt.Bucket, *bolt.Bucket) error) error { + tx, ok := ctx.Value(transactionKey{}).(*bolt.Tx) + if !ok { + return ErrNoTransaction + } + + bkt, err := tx.CreateBucketIfNotExists(bucketKeyStorageVersion) + if err != nil { + return fmt.Errorf("failed to create version bucket: %w", err) + } + sbkt, err := bkt.CreateBucketIfNotExists(bucketKeySnapshot) + if err != nil { + return fmt.Errorf("failed to create snapshots bucket: %w", err) + } + pbkt, err := bkt.CreateBucketIfNotExists(bucketKeyParents) + if err != nil { + return fmt.Errorf("failed to create parents bucket: %w", err) + } + return fn(ctx, sbkt, pbkt) +} + +func parents(bkt, pbkt *bolt.Bucket, parent uint64) (parents []string, err error) { + for { + parents = append(parents, fmt.Sprintf("%d", parent)) + + parentKey := pbkt.Get(bucketKeyParent) + if len(parentKey) == 0 { + return + } + pbkt = bkt.Bucket(parentKey) + if pbkt == nil { + return nil, fmt.Errorf("missing parent: %w", errdefs.ErrNotFound) + } + + parent = readID(pbkt) + } +} + +func readKind(bkt *bolt.Bucket) (k snapshots.Kind) { + kind := bkt.Get(bucketKeyKind) + if len(kind) == 1 { + k = snapshots.Kind(kind[0]) + } + return +} + +func readID(bkt *bolt.Bucket) uint64 { + id, _ := binary.Uvarint(bkt.Get(bucketKeyID)) + return id +} + +func readSnapshot(bkt *bolt.Bucket, id *uint64, si *snapshots.Info) error { + if id != nil { + *id = readID(bkt) + } + if si != nil { + si.Kind = readKind(bkt) + si.Parent = string(bkt.Get(bucketKeyParent)) + + if err := boltutil.ReadTimestamps(bkt, &si.Created, &si.Updated); err != nil { + return err + } + + labels, err := boltutil.ReadLabels(bkt) + if err != nil { + return err + } + si.Labels = labels + } + + return nil +} + +func putSnapshot(bkt *bolt.Bucket, id uint64, si snapshots.Info) error { + idEncoded, err := encodeID(id) + if err != nil { + return err + } + + updates := [][2][]byte{ + {bucketKeyID, idEncoded}, + {bucketKeyKind, []byte{byte(si.Kind)}}, + } + if si.Parent != "" { + updates = append(updates, [2][]byte{bucketKeyParent, []byte(si.Parent)}) + } + for _, v := range updates { + if err := bkt.Put(v[0], v[1]); err != nil { + return err + } + } + if err := boltutil.WriteTimestamps(bkt, si.Created, si.Updated); err != nil { + return err + } + return boltutil.WriteLabels(bkt, si.Labels) +} + +func getUsage(bkt *bolt.Bucket, usage *snapshots.Usage) { + usage.Inodes, _ = binary.Varint(bkt.Get(bucketKeyInodes)) + usage.Size, _ = binary.Varint(bkt.Get(bucketKeySize)) +} + +func putUsage(bkt *bolt.Bucket, usage snapshots.Usage) error { + for _, v := range []struct { + key []byte + value int64 + }{ + {bucketKeyInodes, usage.Inodes}, + {bucketKeySize, usage.Size}, + } { + e, err := encodeSize(v.value) + if err != nil { + return err + } + if err := bkt.Put(v.key, e); err != nil { + return err + } + } + return nil +} + +func encodeSize(size int64) ([]byte, error) { + var ( + buf [binary.MaxVarintLen64]byte + sizeEncoded = buf[:] + ) + sizeEncoded = sizeEncoded[:binary.PutVarint(sizeEncoded, size)] + + if len(sizeEncoded) == 0 { + return nil, fmt.Errorf("failed encoding size = %v", size) + } + return sizeEncoded, nil +} + +func encodeID(id uint64) ([]byte, error) { + var ( + buf [binary.MaxVarintLen64]byte + idEncoded = buf[:] + ) + idEncoded = idEncoded[:binary.PutUvarint(idEncoded, id)] + + if len(idEncoded) == 0 { + return nil, fmt.Errorf("failed encoding id = %v", id) + } + return idEncoded, nil +} + +func adaptSnapshot(info snapshots.Info) filters.Adaptor { + return filters.AdapterFunc(func(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + + switch fieldpath[0] { + case "kind": + switch info.Kind { + case snapshots.KindActive: + return "active", true + case snapshots.KindView: + return "view", true + case snapshots.KindCommitted: + return "committed", true + } + case "name": + return info.Name, true + case "parent": + return info.Parent, true + case "labels": + if len(info.Labels) == 0 { + return "", false + } + + v, ok := info.Labels[strings.Join(fieldpath[1:], ".")] + return v, ok + } + + return "", false + }) +} + +// modified by sealos +func withDevboxBucket(ctx context.Context, fn func(context.Context, *bolt.Bucket, *bolt.Bucket) error) error { + tx, ok := ctx.Value(transactionKey{}).(*bolt.Tx) + if !ok { + return ErrNoTransaction + } + + vbkt := tx.Bucket(bucketKeyStorageVersion) + if vbkt == nil { + return fmt.Errorf("bucket does not exist: %w", errdefs.ErrNotFound) + } + + bkt := vbkt.Bucket(DevboxStoragePathBucket) + if bkt == nil { + // Create the devbox storage path bucket if it does not exist + var err error + bkt, err = vbkt.CreateBucketIfNotExists(DevboxStoragePathBucket) + if err != nil { + return fmt.Errorf("failed to create devbox storage path bucket: %w", err) + } + } + + dbkt := bkt.Bucket(DevboxStoragePathBucket) + if dbkt == nil { + // Create the devbox storage path bucket if it does not exist + var err error + dbkt, err = bkt.CreateBucketIfNotExists(DevboxStoragePathBucket) + if err != nil { + return fmt.Errorf("failed to create devbox storage path bucket: %w", err) + } + } + + return fn(ctx, bkt, dbkt) +} + +func GetDevboxLvName(ctx context.Context, contentKey string) (string, error) { + var ( + lvName string + ) + + if contentKey == "" { + return "", fmt.Errorf("content key cannot be empty") + } + + err := withDevboxBucket(ctx, func(ctx context.Context, _ *bolt.Bucket, dbkt *bolt.Bucket) error { + fmt.Printf("devbox storage path bucket1\n") + if dbkt == nil { + return fmt.Errorf("devbox storage path bucket does not exist: %w", errdefs.ErrNotFound) + } + sdbkt := dbkt.Bucket([]byte(contentKey)) + fmt.Printf("devbox storage path bucket2: %s\n", contentKey) + if sdbkt == nil { + return errdefs.ErrNotFound + } + + lvNameByte := sdbkt.Get(DevboxKeyLvName) + // if len(lvName) == 0 { // no need for this check, as snapshotter will recreate the LVM if lvName is empty + // return fmt.Errorf("LVM name for content key %s not found: %w", contentKey, errdefs.ErrNotFound) + // } + lvName = string(lvNameByte) + fmt.Printf("lvName: %s\n", lvName) + return nil + }) + if err != nil { + return "", err + } + + return lvName, nil +} + +func SetDevboxContent(ctx context.Context, key, contentKey, lvName, path string) error { + if contentKey == "" || lvName == "" || path == "" { + return fmt.Errorf("content key and storage path cannot be empty") + } + + return withDevboxBucket(ctx, func(ctx context.Context, bkt *bolt.Bucket, dbkt *bolt.Bucket) error { + bkt = bkt.Bucket([]byte(key)) + if bkt == nil { + return fmt.Errorf("devbox storage path bucket does not exist: %w", errdefs.ErrNotFound) + } + fmt.Printf("devbox storage path bucket: %s\n", key) + bkt.Put(DevboxKeyContentID, []byte(contentKey)) + if dbkt == nil { + return fmt.Errorf("devbox storage path bucket does not exist: %w", errdefs.ErrNotFound) + } + sdbkt, err := dbkt.CreateBucketIfNotExists([]byte(contentKey)) + if err != nil { + return fmt.Errorf("failed to create bucket for content key %s: %w", contentKey, err) + } + if err := sdbkt.Put([]byte(DevboxKeyLvName), []byte(lvName)); err != nil { + return fmt.Errorf("failed to set storage path for content key %s: %w", contentKey, err) + } + if err := sdbkt.Put([]byte(DevboxKeyPath), []byte(path)); err != nil { + return fmt.Errorf("failed to set storage path for content key %s: %w", contentKey, err) + } + if err := sdbkt.Put([]byte(DevboxKeyStatus), []byte(DevboxStatusActive)); err != nil { + return fmt.Errorf("failed to set status for content key %s: %w", contentKey, err) + } + return nil + }) +} + +func SetDevboxContentStatusRemove(ctx context.Context, contentKey string) error { + if contentKey == "" { + return fmt.Errorf("content key cannot be empty") + } + + return withDevboxBucket(ctx, func(ctx context.Context, _ *bolt.Bucket, dbkt *bolt.Bucket) error { + sdbkt := dbkt.Bucket([]byte(contentKey)) + if sdbkt == nil { + return fmt.Errorf("devbox storage path bucket for content key %s does not exist: %w", contentKey, errdefs.ErrNotFound) + } + if err := sdbkt.Put(DevboxKeyStatus, DevboxStatusRemoved); err != nil { + return fmt.Errorf("failed to set status for content key %s: %w", contentKey, err) + } + fmt.Printf("Set devbox content status for key: %s, status: %s\n", contentKey, DevboxStatusRemoved) + return nil + }) +} + +func RemoveDevboxContent(ctx context.Context, Key string) (string, error) { + var ( + mountPath string + ) + fmt.Printf("Removing devbox content for key: %s\n", Key) + if Key == "" { + return "", fmt.Errorf("content key cannot be empty") + } + + err := withDevboxBucket(ctx, func(ctx context.Context, bkt *bolt.Bucket, dbkt *bolt.Bucket) error { + if bkt == nil { + return fmt.Errorf("devbox storage path bucket does not exist: %w", errdefs.ErrNotFound) + } + sbkt := bkt.Bucket([]byte(Key)) + if sbkt == nil { + return errdefs.ErrNotFound + } + contentID := sbkt.Get(DevboxKeyContentID) + fmt.Printf("devbox storage path bucket for key: %s, content ID: %s\n", Key, string(contentID)) + mountPath = string(sbkt.Get(DevboxKeyPath)) + if contentID == nil { + // return fmt.Errorf("content ID for key %s not found: %w", Key, errdefs.ErrNotFound) + } + if err := bkt.DeleteBucket([]byte(Key)); err != nil { + if errors.Is(err, bolt.ErrBucketNotFound) { + return fmt.Errorf("storage path for content key %s not found: %w", Key, errdefs.ErrNotFound) + } + return fmt.Errorf("failed to delete storage path for content key %s: %w", Key, err) + } + sdbkt := dbkt.Bucket([]byte(contentID)) + if sdbkt == nil { + return fmt.Errorf("devbox storage path bucket for content ID %s does not exist: %w", string(contentID), errdefs.ErrNotFound) + } + if status := sdbkt.Get(DevboxKeyStatus); status != nil && string(status) == string(DevboxStatusRemoved) { + dbkt.Delete([]byte(contentID)) + fmt.Printf("Removed devbox content for key: %s, content ID: %s\n", Key, string(contentID)) + return nil + } + return nil + }) + if err != nil { + return "", err + } + fmt.Printf("Removed devbox content for key: %s, mount path: %s\n", Key, mountPath) + return mountPath, nil +} + +// IDMap returns all the IDs mapped to their key +func GetDevboxLvNames(ctx context.Context) (map[string]string, error) { + m := map[string]string{} + if err := withDevboxBucket(ctx, func(ctx context.Context, _ *bolt.Bucket, dbkt *bolt.Bucket) error { + return dbkt.ForEach(func(k, v []byte) error { + // skip non buckets + if v != nil { + return nil + } + path := dbkt.Bucket(k).Get(DevboxKeyPath) + m[string(path)] = string(k) + return nil + }) + }); err != nil { + return nil, err + } + + return m, nil +} diff --git a/snapshots/devbox/storage/metastore.go b/snapshots/devbox/storage/metastore.go new file mode 100644 index 000000000000..2f7beac1be1c --- /dev/null +++ b/snapshots/devbox/storage/metastore.go @@ -0,0 +1,157 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Package storage provides a metadata storage implementation for snapshot +// drivers. Drive implementations are responsible for starting and managing +// transactions using the defined context creator. This storage package uses +// BoltDB for storing metadata. Access to the raw boltdb transaction is not +// provided, but the stored object is provided by the proto subpackage. +package storage + +import ( + "context" + "errors" + "fmt" + "sync" + + "github.com/containerd/containerd/snapshots" + "github.com/containerd/log" + bolt "go.etcd.io/bbolt" +) + +// Transactor is used to finalize an active transaction. +type Transactor interface { + // Commit commits any changes made during the transaction. On error a + // caller is expected to clean up any resources which would have relied + // on data mutated as part of this transaction. Only writable + // transactions can commit, non-writable must call Rollback. + Commit() error + + // Rollback rolls back any changes made during the transaction. This + // must be called on all non-writable transactions and aborted writable + // transaction. + Rollback() error +} + +// Snapshot hold the metadata for an active or view snapshot transaction. The +// ParentIDs hold the snapshot identifiers for the committed snapshots this +// active or view is based on. The ParentIDs are ordered from the lowest base +// to highest, meaning they should be applied in order from the first index to +// the last index. The last index should always be considered the active +// snapshots immediate parent. +type Snapshot struct { + Kind snapshots.Kind + ID string + ParentIDs []string +} + +// MetaStore is used to store metadata related to a snapshot driver. The +// MetaStore is intended to store metadata related to name, state and +// parentage. Using the MetaStore is not required to implement a snapshot +// driver but can be used to handle the persistence and transactional +// complexities of a driver implementation. +type MetaStore struct { + dbfile string + + dbL sync.Mutex + db *bolt.DB +} + +// NewMetaStore returns a snapshot MetaStore for storage of metadata related to +// a snapshot driver backed by a bolt file database. This implementation is +// strongly consistent and does all metadata changes in a transaction to prevent +// against process crashes causing inconsistent metadata state. +func NewMetaStore(dbfile string) (*MetaStore, error) { + return &MetaStore{ + dbfile: dbfile, + }, nil +} + +type transactionKey struct{} + +// TransactionContext creates a new transaction context. The writable value +// should be set to true for transactions which are expected to mutate data. +func (ms *MetaStore) TransactionContext(ctx context.Context, writable bool) (context.Context, Transactor, error) { + ms.dbL.Lock() + if ms.db == nil { + db, err := bolt.Open(ms.dbfile, 0600, nil) + if err != nil { + ms.dbL.Unlock() + return ctx, nil, fmt.Errorf("failed to open database file: %w", err) + } + ms.db = db + } + ms.dbL.Unlock() + + tx, err := ms.db.Begin(writable) + if err != nil { + return ctx, nil, fmt.Errorf("failed to start transaction: %w", err) + } + + ctx = context.WithValue(ctx, transactionKey{}, tx) + + return ctx, tx, nil +} + +// TransactionCallback represents a callback to be invoked while under a metastore transaction. +type TransactionCallback func(ctx context.Context) error + +// WithTransaction is a convenience method to run a function `fn` while holding a meta store transaction. +// If the callback `fn` returns an error or the transaction is not writable, the database transaction will be discarded. +func (ms *MetaStore) WithTransaction(ctx context.Context, writable bool, fn TransactionCallback) error { + ctx, trans, err := ms.TransactionContext(ctx, writable) + if err != nil { + return err + } + + var result []error + err = fn(ctx) + if err != nil { + result = append(result, err) + } + + // Always rollback if transaction is not writable + if err != nil || !writable { + if terr := trans.Rollback(); terr != nil { + log.G(ctx).WithError(terr).Error("failed to rollback transaction") + + result = append(result, fmt.Errorf("rollback failed: %w", terr)) + } + } else { + if terr := trans.Commit(); terr != nil { + log.G(ctx).WithError(terr).Error("failed to commit transaction") + + result = append(result, fmt.Errorf("commit failed: %w", terr)) + } + } + + if err := errors.Join(result...); err != nil { + log.G(ctx).WithError(err).Debug("snapshotter error") + return err + } + + return nil +} + +// Close closes the metastore and any underlying database connections +func (ms *MetaStore) Close() error { + ms.dbL.Lock() + defer ms.dbL.Unlock() + if ms.db == nil { + return nil + } + return ms.db.Close() +} diff --git a/vendor/github.com/emicklei/go-restful/v3/CHANGES.md b/vendor/github.com/emicklei/go-restful/v3/CHANGES.md index 02a73ccfd1ae..352018e70370 100644 --- a/vendor/github.com/emicklei/go-restful/v3/CHANGES.md +++ b/vendor/github.com/emicklei/go-restful/v3/CHANGES.md @@ -1,5 +1,10 @@ # Change history of go-restful +## [v3.10.2] - 2023-03-09 + +- introduced MergePathStrategy to be able to revert behaviour of path concatenation to 3.9.0 + see comment in Readme how to customize this behaviour. + ## [v3.10.1] - 2022-11-19 - fix broken 3.10.0 by using path package for joining paths diff --git a/vendor/github.com/emicklei/go-restful/v3/README.md b/vendor/github.com/emicklei/go-restful/v3/README.md index 0625359dc409..85da90128e4e 100644 --- a/vendor/github.com/emicklei/go-restful/v3/README.md +++ b/vendor/github.com/emicklei/go-restful/v3/README.md @@ -96,6 +96,10 @@ There are several hooks to customize the behavior of the go-restful package. - Compression - Encoders for other serializers - Use [jsoniter](https://github.com/json-iterator/go) by building this package using a build tag, e.g. `go build -tags=jsoniter .` +- Use the variable `MergePathStrategy` to change the behaviour of composing the Route path given a root path and a local route path + - versions >= 3.10.1 has set the value to `PathJoinStrategy` that fixes a reported [security issue](https://github.com/advisories/GHSA-r48q-9g5r-8q2h) but may cause your services not to work correctly anymore. + - versions <= 3.9 had the behaviour that can be restored in newer versions by setting the value to `TrimSlashStrategy`. + - you can set value to a custom implementation (must implement MergePathStrategyFunc) ## Resources diff --git a/vendor/github.com/emicklei/go-restful/v3/route_builder.go b/vendor/github.com/emicklei/go-restful/v3/route_builder.go index 830ebf148ed1..827f471de000 100644 --- a/vendor/github.com/emicklei/go-restful/v3/route_builder.go +++ b/vendor/github.com/emicklei/go-restful/v3/route_builder.go @@ -353,8 +353,28 @@ func (b *RouteBuilder) Build() Route { return route } -func concatPath(path1, path2 string) string { - return path.Join(path1, path2) +type MergePathStrategyFunc func(rootPath, routePath string) string + +var ( + // behavior >= 3.10 + PathJoinStrategy = func(rootPath, routePath string) string { + return path.Join(rootPath, routePath) + } + + // behavior <= 3.9 + TrimSlashStrategy = func(rootPath, routePath string) string { + return strings.TrimRight(rootPath, "/") + "/" + strings.TrimLeft(routePath, "/") + } + + // MergePathStrategy is the active strategy for merging a Route path when building the routing of all WebServices. + // The value is set to PathJoinStrategy + // PathJoinStrategy is a strategy that is more strict [Security - PRISMA-2022-0227] + MergePathStrategy = PathJoinStrategy +) + +// merge two paths using the current (package global) merge path strategy. +func concatPath(rootPath, routePath string) string { + return MergePathStrategy(rootPath, routePath) } var anonymousFuncCount int32 diff --git a/vendor/github.com/klauspost/compress/flate/_gen/gen_inflate.go b/vendor/github.com/klauspost/compress/flate/_gen/gen_inflate.go deleted file mode 100644 index 64ff1aad4ce5..000000000000 --- a/vendor/github.com/klauspost/compress/flate/_gen/gen_inflate.go +++ /dev/null @@ -1,303 +0,0 @@ -//go:build generate -// +build generate - -//go:generate go run $GOFILE -//go:generate go fmt ../inflate_gen.go - -package main - -import ( - "os" - "strings" -) - -func main() { - f, err := os.Create("../inflate_gen.go") - if err != nil { - panic(err) - } - defer f.Close() - types := []string{"*bytes.Buffer", "*bytes.Reader", "*bufio.Reader", "*strings.Reader", "Reader"} - names := []string{"BytesBuffer", "BytesReader", "BufioReader", "StringsReader", "GenericReader"} - imports := []string{"bytes", "bufio", "fmt", "strings", "math/bits"} - f.WriteString(`// Code generated by go generate gen_inflate.go. DO NOT EDIT. - -package flate - -import ( -`) - - for _, imp := range imports { - f.WriteString("\t\"" + imp + "\"\n") - } - f.WriteString(")\n\n") - - template := ` - -// Decode a single Huffman block from f. -// hl and hd are the Huffman states for the lit/length values -// and the distance values, respectively. If hd == nil, using the -// fixed distance encoding associated with fixed Huffman blocks. -func (f *decompressor) $FUNCNAME$() { - const ( - stateInit = iota // Zero value must be stateInit - stateDict - ) - fr := f.r.($TYPE$) - - // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, - // but is smart enough to keep local variables in registers, so use nb and b, - // inline call to moreBits and reassign b,nb back to f on return. - fnb, fb, dict := f.nb, f.b, &f.dict - - switch f.stepState { - case stateInit: - goto readLiteral - case stateDict: - goto copyHistory - } - -readLiteral: - // Read literal and/or (length, distance) according to RFC section 3.2.3. - { - var v int - { - // Inlined v, err := f.huffSym(f.hl) - // Since a huffmanDecoder can be empty or be composed of a degenerate tree - // with single element, huffSym must error on these two edge cases. In both - // cases, the chunks slice will be 0 for the invalid sequence, leading it - // satisfy the n == 0 check below. - n := uint(f.hl.maxRead) - for { - for fnb < n { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - f.err = noEOF(err) - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - chunk := f.hl.chunks[fb&(huffmanNumChunks-1)] - n = uint(chunk & huffmanCountMask) - if n > huffmanChunkBits { - chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask] - n = uint(chunk & huffmanCountMask) - } - if n <= fnb { - if n == 0 { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("huffsym: n==0") - } - f.err = CorruptInputError(f.roffset) - return - } - fb = fb >> (n & regSizeMaskUint32) - fnb = fnb - n - v = int(chunk >> huffmanValueShift) - break - } - } - } - - var length int - switch { - case v < 256: - dict.writeByte(byte(v)) - if dict.availWrite() == 0 { - f.toRead = dict.readFlush() - f.step = (*decompressor).$FUNCNAME$ - f.stepState = stateInit - f.b, f.nb = fb, fnb - return - } - goto readLiteral - case v == 256: - f.b, f.nb = fb, fnb - f.finishBlock() - return - // otherwise, reference to older data - case v < 265: - length = v - (257 - 3) - case v < maxNumLit: - val := decCodeToLen[(v - 257)] - length = int(val.length) + 3 - n := uint(val.extra) - for fnb < n { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("morebits n>0:", err) - } - f.err = err - return - } - f.roffset++ - fb |= uint32(c) << (fnb®SizeMaskUint32) - fnb += 8 - } - length += int(fb & bitMask32[n]) - fb >>= n & regSizeMaskUint32 - fnb -= n - default: - if debugDecode { - fmt.Println(v, ">= maxNumLit") - } - f.err = CorruptInputError(f.roffset) - f.b, f.nb = fb, fnb - return - } - - var dist uint32 - if f.hd == nil { - for fnb < 5 { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("morebits f.nb<5:", err) - } - f.err = err - return - } - f.roffset++ - fb |= uint32(c) << (fnb®SizeMaskUint32) - fnb += 8 - } - dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3))) - fb >>= 5 - fnb -= 5 - } else { - // Since a huffmanDecoder can be empty or be composed of a degenerate tree - // with single element, huffSym must error on these two edge cases. In both - // cases, the chunks slice will be 0 for the invalid sequence, leading it - // satisfy the n == 0 check below. - n := uint(f.hd.maxRead) - // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, - // but is smart enough to keep local variables in registers, so use nb and b, - // inline call to moreBits and reassign b,nb back to f on return. - for { - for fnb < n { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - f.err = noEOF(err) - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - chunk := f.hd.chunks[fb&(huffmanNumChunks-1)] - n = uint(chunk & huffmanCountMask) - if n > huffmanChunkBits { - chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask] - n = uint(chunk & huffmanCountMask) - } - if n <= fnb { - if n == 0 { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("huffsym: n==0") - } - f.err = CorruptInputError(f.roffset) - return - } - fb = fb >> (n & regSizeMaskUint32) - fnb = fnb - n - dist = uint32(chunk >> huffmanValueShift) - break - } - } - } - - switch { - case dist < 4: - dist++ - case dist < maxNumDist: - nb := uint(dist-2) >> 1 - // have 1 bit in bottom of dist, need nb more. - extra := (dist & 1) << (nb & regSizeMaskUint32) - for fnb < nb { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 - fnb -= nb - dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra - // slower: dist = bitMask32[nb+1] + 2 + extra - default: - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("dist too big:", dist, maxNumDist) - } - f.err = CorruptInputError(f.roffset) - return - } - - // No check on length; encoding can be prescient. - if dist > uint32(dict.histSize()) { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("dist > dict.histSize():", dist, dict.histSize()) - } - f.err = CorruptInputError(f.roffset) - return - } - - f.copyLen, f.copyDist = length, int(dist) - goto copyHistory - } - -copyHistory: - // Perform a backwards copy according to RFC section 3.2.3. - { - cnt := dict.tryWriteCopy(f.copyDist, f.copyLen) - if cnt == 0 { - cnt = dict.writeCopy(f.copyDist, f.copyLen) - } - f.copyLen -= cnt - - if dict.availWrite() == 0 || f.copyLen > 0 { - f.toRead = dict.readFlush() - f.step = (*decompressor).$FUNCNAME$ // We need to continue this work - f.stepState = stateDict - f.b, f.nb = fb, fnb - return - } - goto readLiteral - } - // Not reached -} - -` - for i, t := range types { - s := strings.Replace(template, "$FUNCNAME$", "huffman"+names[i], -1) - s = strings.Replace(s, "$TYPE$", t, -1) - f.WriteString(s) - } - f.WriteString("func (f *decompressor) huffmanBlockDecoder() func() {\n") - f.WriteString("\tswitch f.r.(type) {\n") - for i, t := range types { - f.WriteString("\t\tcase " + t + ":\n") - f.WriteString("\t\t\treturn f.huffman" + names[i] + "\n") - } - f.WriteString("\t\tdefault:\n") - f.WriteString("\t\t\treturn f.huffmanGenericReader\n") - f.WriteString("\t}\n}\n") -} diff --git a/vendor/github.com/klauspost/compress/flate/deflate_test.go b/vendor/github.com/klauspost/compress/flate/deflate_test.go deleted file mode 100644 index f9584ceb3ae3..000000000000 --- a/vendor/github.com/klauspost/compress/flate/deflate_test.go +++ /dev/null @@ -1,665 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Copyright (c) 2015 Klaus Post -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package flate - -import ( - "bytes" - "fmt" - "io" - "os" - "reflect" - "strings" - "sync" - "testing" -) - -type deflateTest struct { - in []byte - level int - out []byte -} - -type deflateInflateTest struct { - in []byte -} - -type reverseBitsTest struct { - in uint16 - bitCount uint8 - out uint16 -} - -var deflateTests = []*deflateTest{ - 0: {[]byte{}, 0, []byte{0x3, 0x0}}, - 1: {[]byte{0x11}, BestCompression, []byte{0x12, 0x4, 0xc, 0x0}}, - 2: {[]byte{0x11}, BestCompression, []byte{0x12, 0x4, 0xc, 0x0}}, - 3: {[]byte{0x11}, BestCompression, []byte{0x12, 0x4, 0xc, 0x0}}, - - 4: {[]byte{0x11}, 0, []byte{0x0, 0x1, 0x0, 0xfe, 0xff, 0x11, 0x3, 0x0}}, - 5: {[]byte{0x11, 0x12}, 0, []byte{0x0, 0x2, 0x0, 0xfd, 0xff, 0x11, 0x12, 0x3, 0x0}}, - 6: {[]byte{0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11}, 0, - []byte{0x0, 0x8, 0x0, 0xf7, 0xff, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x3, 0x0}, - }, - 7: {[]byte{}, 1, []byte{0x3, 0x0}}, - 8: {[]byte{0x11}, BestCompression, []byte{0x12, 0x4, 0xc, 0x0}}, - 9: {[]byte{0x11, 0x12}, BestCompression, []byte{0x12, 0x14, 0x2, 0xc, 0x0}}, - 10: {[]byte{0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11}, BestCompression, []byte{0x12, 0x84, 0x1, 0xc0, 0x0}}, - 11: {[]byte{}, 9, []byte{0x3, 0x0}}, - 12: {[]byte{0x11}, 9, []byte{0x12, 0x4, 0xc, 0x0}}, - 13: {[]byte{0x11, 0x12}, 9, []byte{0x12, 0x14, 0x2, 0xc, 0x0}}, - 14: {[]byte{0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11}, 9, []byte{0x12, 0x84, 0x1, 0xc0, 0x0}}, -} - -var deflateInflateTests = []*deflateInflateTest{ - {[]byte{}}, - {[]byte{0x11}}, - {[]byte{0x11, 0x12}}, - {[]byte{0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11}}, - {[]byte{0x11, 0x10, 0x13, 0x41, 0x21, 0x21, 0x41, 0x13, 0x87, 0x78, 0x13}}, - {largeDataChunk()}, -} - -var reverseBitsTests = []*reverseBitsTest{ - {1, 1, 1}, - {1, 2, 2}, - {1, 3, 4}, - {1, 4, 8}, - {1, 5, 16}, - {17, 5, 17}, - {257, 9, 257}, - {29, 5, 23}, -} - -func largeDataChunk() []byte { - result := make([]byte, 100000) - for i := range result { - result[i] = byte(i * i & 0xFF) - } - return result -} - -func TestBulkHash4(t *testing.T) { - for _, x := range deflateTests { - y := x.out - if len(y) >= minMatchLength { - y = append(y, y...) - for j := 4; j < len(y); j++ { - y := y[:j] - dst := make([]uint32, len(y)-minMatchLength+1) - for i := range dst { - dst[i] = uint32(i + 100) - } - bulkHash4(y, dst) - for i, val := range dst { - got := val - expect := hash4(y[i:]) - if got != expect && got == uint32(i)+100 { - t.Errorf("Len:%d Index:%d, expected 0x%08x but not modified", len(y), i, expect) - } else if got != expect { - t.Errorf("Len:%d Index:%d, got 0x%08x expected:0x%08x", len(y), i, got, expect) - } else { - //t.Logf("Len:%d Index:%d OK (0x%08x)", len(y), i, got) - } - } - } - } - } -} - -func TestDeflate(t *testing.T) { - for i, h := range deflateTests { - var buf bytes.Buffer - w, err := NewWriter(&buf, h.level) - if err != nil { - t.Errorf("NewWriter: %v", err) - continue - } - w.Write(h.in) - w.Close() - if !bytes.Equal(buf.Bytes(), h.out) { - t.Errorf("%d: Deflate(%d, %x) got \n%#v, want \n%#v", i, h.level, h.in, buf.Bytes(), h.out) - } - } -} - -// A sparseReader returns a stream consisting of 0s followed by 1<<16 1s. -// This tests missing hash references in a very large input. -type sparseReader struct { - l int64 - cur int64 -} - -func (r *sparseReader) Read(b []byte) (n int, err error) { - if r.cur >= r.l { - return 0, io.EOF - } - n = len(b) - cur := r.cur + int64(n) - if cur > r.l { - n -= int(cur - r.l) - cur = r.l - } - for i := range b[0:n] { - if r.cur+int64(i) >= r.l-1<<16 { - b[i] = 1 - } else { - b[i] = 0 - } - } - r.cur = cur - return -} - -func TestVeryLongSparseChunk(t *testing.T) { - if testing.Short() { - t.Skip("skipping sparse chunk during short test") - } - var buf bytes.Buffer - w, err := NewWriter(&buf, 1) - if err != nil { - t.Errorf("NewWriter: %v", err) - return - } - if _, err = io.Copy(w, &sparseReader{l: 23e8}); err != nil { - t.Errorf("Compress failed: %v", err) - return - } - t.Log("Length:", buf.Len()) -} - -type syncBuffer struct { - buf bytes.Buffer - mu sync.RWMutex - closed bool - ready chan bool -} - -func newSyncBuffer() *syncBuffer { - return &syncBuffer{ready: make(chan bool, 1)} -} - -func (b *syncBuffer) Read(p []byte) (n int, err error) { - for { - b.mu.RLock() - n, err = b.buf.Read(p) - b.mu.RUnlock() - if n > 0 || b.closed { - return - } - <-b.ready - } -} - -func (b *syncBuffer) signal() { - select { - case b.ready <- true: - default: - } -} - -func (b *syncBuffer) Write(p []byte) (n int, err error) { - n, err = b.buf.Write(p) - b.signal() - return -} - -func (b *syncBuffer) WriteMode() { - b.mu.Lock() -} - -func (b *syncBuffer) ReadMode() { - b.mu.Unlock() - b.signal() -} - -func (b *syncBuffer) Close() error { - b.closed = true - b.signal() - return nil -} - -func testSync(t *testing.T, level int, input []byte, name string) { - if len(input) == 0 { - return - } - - t.Logf("--testSync %d, %d, %s", level, len(input), name) - buf := newSyncBuffer() - buf1 := new(bytes.Buffer) - buf.WriteMode() - w, err := NewWriter(io.MultiWriter(buf, buf1), level) - if err != nil { - t.Errorf("NewWriter: %v", err) - return - } - r := NewReader(buf) - - // Write half the input and read back. - for i := 0; i < 2; i++ { - var lo, hi int - if i == 0 { - lo, hi = 0, (len(input)+1)/2 - } else { - lo, hi = (len(input)+1)/2, len(input) - } - t.Logf("#%d: write %d-%d", i, lo, hi) - if _, err := w.Write(input[lo:hi]); err != nil { - t.Errorf("testSync: write: %v", err) - return - } - if i == 0 { - if err := w.Flush(); err != nil { - t.Errorf("testSync: flush: %v", err) - return - } - } else { - if err := w.Close(); err != nil { - t.Errorf("testSync: close: %v", err) - } - } - buf.ReadMode() - out := make([]byte, hi-lo+1) - m, err := io.ReadAtLeast(r, out, hi-lo) - t.Logf("#%d: read %d", i, m) - if m != hi-lo || err != nil { - t.Errorf("testSync/%d (%d, %d, %s): read %d: %d, %v (%d left)", i, level, len(input), name, hi-lo, m, err, buf.buf.Len()) - return - } - if !bytes.Equal(input[lo:hi], out[:hi-lo]) { - t.Errorf("testSync/%d: read wrong bytes: %x vs %x", i, input[lo:hi], out[:hi-lo]) - return - } - // This test originally checked that after reading - // the first half of the input, there was nothing left - // in the read buffer (buf.buf.Len() != 0) but that is - // not necessarily the case: the write Flush may emit - // some extra framing bits that are not necessary - // to process to obtain the first half of the uncompressed - // data. The test ran correctly most of the time, because - // the background goroutine had usually read even - // those extra bits by now, but it's not a useful thing to - // check. - buf.WriteMode() - } - buf.ReadMode() - out := make([]byte, 10) - if n, err := r.Read(out); n > 0 || err != io.EOF { - t.Errorf("testSync (%d, %d, %s): final Read: %d, %v (hex: %x)", level, len(input), name, n, err, out[0:n]) - } - if buf.buf.Len() != 0 { - t.Errorf("testSync (%d, %d, %s): extra data at end", level, len(input), name) - } - r.Close() - - // stream should work for ordinary reader too - r = NewReader(buf1) - out, err = io.ReadAll(r) - if err != nil { - t.Errorf("testSync: read: %s", err) - return - } - r.Close() - if !bytes.Equal(input, out) { - t.Errorf("testSync: decompress(compress(data)) != data: level=%d input=%s", level, name) - } -} - -func testToFromWithLevelAndLimit(t *testing.T, level int, input []byte, name string, limit int) { - var buffer bytes.Buffer - w, err := NewWriter(&buffer, level) - if err != nil { - t.Errorf("NewWriter: %v", err) - return - } - w.Write(input) - w.Close() - if limit > 0 { - t.Logf("level: %d - Size:%.2f%%, %d b\n", level, float64(buffer.Len()*100)/float64(limit), buffer.Len()) - } - if limit > 0 && buffer.Len() > limit { - t.Errorf("level: %d, len(compress(data)) = %d > limit = %d", level, buffer.Len(), limit) - } - - r := NewReader(&buffer) - out, err := io.ReadAll(r) - if err != nil { - t.Errorf("read: %s", err) - return - } - r.Close() - if !bytes.Equal(input, out) { - os.WriteFile("testdata/fails/"+t.Name()+".got", out, os.ModePerm) - os.WriteFile("testdata/fails/"+t.Name()+".want", input, os.ModePerm) - t.Errorf("decompress(compress(data)) != data: level=%d input=%s", level, name) - return - } - testSync(t, level, input, name) -} - -func testToFromWithLimit(t *testing.T, input []byte, name string, limit [11]int) { - for i := 0; i < 10; i++ { - testToFromWithLevelAndLimit(t, i, input, name, limit[i]) - } - testToFromWithLevelAndLimit(t, -2, input, name, limit[10]) -} - -func TestDeflateInflate(t *testing.T) { - for i, h := range deflateInflateTests { - testToFromWithLimit(t, h.in, fmt.Sprintf("#%d", i), [11]int{}) - } -} - -func TestReverseBits(t *testing.T) { - for _, h := range reverseBitsTests { - if v := reverseBits(h.in, h.bitCount); v != h.out { - t.Errorf("reverseBits(%v,%v) = %v, want %v", - h.in, h.bitCount, v, h.out) - } - } -} - -type deflateInflateStringTest struct { - filename string - label string - limit [11]int // Number 11 is ConstantCompression -} - -var deflateInflateStringTests = []deflateInflateStringTest{ - { - "../testdata/e.txt", - "2.718281828...", - [...]int{100018, 67900, 50960, 51150, 50930, 50790, 50790, 50790, 50790, 50790, 43683 + 100}, - }, - { - "../testdata/Mark.Twain-Tom.Sawyer.txt", - "Mark.Twain-Tom.Sawyer", - [...]int{387999, 185000, 182361, 179974, 174124, 168819, 162936, 160506, 160295, 160295, 233460 + 100}, - }, -} - -func TestDeflateInflateString(t *testing.T) { - for _, test := range deflateInflateStringTests { - gold, err := os.ReadFile(test.filename) - if err != nil { - t.Error(err) - } - // Remove returns that may be present on Windows - neutral := strings.Map(func(r rune) rune { - if r != '\r' { - return r - } - return -1 - }, string(gold)) - - testToFromWithLimit(t, []byte(neutral), test.label, test.limit) - - if testing.Short() { - break - } - } -} - -func TestReaderDict(t *testing.T) { - const ( - dict = "hello world" - text = "hello again world" - ) - var b bytes.Buffer - w, err := NewWriter(&b, 5) - if err != nil { - t.Fatalf("NewWriter: %v", err) - } - w.Write([]byte(dict)) - w.Flush() - b.Reset() - w.Write([]byte(text)) - w.Close() - - r := NewReaderDict(&b, []byte(dict)) - data, err := io.ReadAll(r) - if err != nil { - t.Fatal(err) - } - if string(data) != "hello again world" { - t.Fatalf("read returned %q want %q", string(data), text) - } -} - -func TestWriterDict(t *testing.T) { - const ( - dict = "hello world Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua." - text = "hello world Lorem ipsum dolor sit amet" - ) - // This test is sensitive to algorithm changes that skip - // data in favour of speed. Higher levels are less prone to this - // so we test level 4-9. - for l := 4; l < 9; l++ { - var b bytes.Buffer - w, err := NewWriter(&b, l) - if err != nil { - t.Fatalf("level %d, NewWriter: %v", l, err) - } - w.Write([]byte(dict)) - w.Flush() - b.Reset() - w.Write([]byte(text)) - w.Close() - - var b1 bytes.Buffer - w, _ = NewWriterDict(&b1, l, []byte(dict)) - w.Write([]byte(text)) - w.Close() - - if !bytes.Equal(b1.Bytes(), b.Bytes()) { - t.Errorf("level %d, writer wrote\n%v\n want\n%v", l, b1.Bytes(), b.Bytes()) - } - } -} - -// See http://code.google.com/p/go/issues/detail?id=2508 -func TestRegression2508(t *testing.T) { - if testing.Short() { - t.Logf("test disabled with -short") - return - } - w, err := NewWriter(io.Discard, 1) - if err != nil { - t.Fatalf("NewWriter: %v", err) - } - buf := make([]byte, 1024) - for i := 0; i < 131072; i++ { - if _, err := w.Write(buf); err != nil { - t.Fatalf("writer failed: %v", err) - } - } - w.Close() -} - -func TestWriterReset(t *testing.T) { - for level := -2; level <= 9; level++ { - if level == -1 { - level++ - } - if testing.Short() && level > 1 { - break - } - w, err := NewWriter(io.Discard, level) - if err != nil { - t.Fatalf("NewWriter: %v", err) - } - buf := []byte("hello world") - for i := 0; i < 1024; i++ { - w.Write(buf) - } - w.Reset(io.Discard) - - wref, err := NewWriter(io.Discard, level) - if err != nil { - t.Fatalf("NewWriter: %v", err) - } - - // DeepEqual doesn't compare functions. - w.d.fill, wref.d.fill = nil, nil - w.d.step, wref.d.step = nil, nil - w.d.state, wref.d.state = nil, nil - w.d.fast, wref.d.fast = nil, nil - - // hashMatch is always overwritten when used. - if w.d.tokens.n != 0 { - t.Errorf("level %d Writer not reset after Reset. %d tokens were present", level, w.d.tokens.n) - } - // As long as the length is 0, we don't care about the content. - w.d.tokens = wref.d.tokens - - // We don't care if there are values in the window, as long as it is at d.index is 0 - w.d.window = wref.d.window - if !reflect.DeepEqual(w, wref) { - t.Errorf("level %d Writer not reset after Reset", level) - } - } - - for i := HuffmanOnly; i <= BestCompression; i++ { - testResetOutput(t, fmt.Sprint("level-", i), func(w io.Writer) (*Writer, error) { return NewWriter(w, i) }) - } - dict := []byte(strings.Repeat("we are the world - how are you?", 3)) - for i := HuffmanOnly; i <= BestCompression; i++ { - testResetOutput(t, fmt.Sprint("dict-level-", i), func(w io.Writer) (*Writer, error) { return NewWriterDict(w, i, dict) }) - } - for i := HuffmanOnly; i <= BestCompression; i++ { - testResetOutput(t, fmt.Sprint("dict-reset-level-", i), func(w io.Writer) (*Writer, error) { - w2, err := NewWriter(nil, i) - if err != nil { - return w2, err - } - w2.ResetDict(w, dict) - return w2, nil - }) - } -} - -func testResetOutput(t *testing.T, name string, newWriter func(w io.Writer) (*Writer, error)) { - t.Run(name, func(t *testing.T) { - buf := new(bytes.Buffer) - w, err := newWriter(buf) - if err != nil { - t.Fatalf("NewWriter: %v", err) - } - b := []byte("hello world - how are you doing?") - for i := 0; i < 1024; i++ { - w.Write(b) - } - w.Close() - out1 := buf.Bytes() - - buf2 := new(bytes.Buffer) - w.Reset(buf2) - for i := 0; i < 1024; i++ { - w.Write(b) - } - w.Close() - out2 := buf2.Bytes() - - if len(out1) != len(out2) { - t.Errorf("got %d, expected %d bytes", len(out2), len(out1)) - } - if !bytes.Equal(out1, out2) { - mm := 0 - for i, b := range out1[:len(out2)] { - if b != out2[i] { - t.Errorf("mismatch index %d: %02x, expected %02x", i, out2[i], b) - } - mm++ - if mm == 10 { - t.Fatal("Stopping") - } - } - } - t.Logf("got %d bytes", len(out1)) - }) -} - -// TestBestSpeed tests that round-tripping through deflate and then inflate -// recovers the original input. The Write sizes are near the thresholds in the -// compressor.encSpeed method (0, 16, 128), as well as near maxStoreBlockSize -// (65535). -func TestBestSpeed(t *testing.T) { - abc := make([]byte, 128) - for i := range abc { - abc[i] = byte(i) - } - abcabc := bytes.Repeat(abc, 131072/len(abc)) - var want []byte - - testCases := [][]int{ - {65536, 0}, - {65536, 1}, - {65536, 1, 256}, - {65536, 1, 65536}, - {65536, 14}, - {65536, 15}, - {65536, 16}, - {65536, 16, 256}, - {65536, 16, 65536}, - {65536, 127}, - {65536, 128}, - {65536, 128, 256}, - {65536, 128, 65536}, - {65536, 129}, - {65536, 65536, 256}, - {65536, 65536, 65536}, - } - - for i, tc := range testCases { - if testing.Short() && i > 5 { - t.Skip() - } - for _, firstN := range []int{1, 65534, 65535, 65536, 65537, 131072} { - tc[0] = firstN - outer: - for _, flush := range []bool{false, true} { - buf := new(bytes.Buffer) - want = want[:0] - - w, err := NewWriter(buf, BestSpeed) - if err != nil { - t.Errorf("i=%d, firstN=%d, flush=%t: NewWriter: %v", i, firstN, flush, err) - continue - } - for _, n := range tc { - want = append(want, abcabc[:n]...) - if _, err := w.Write(abcabc[:n]); err != nil { - t.Errorf("i=%d, firstN=%d, flush=%t: Write: %v", i, firstN, flush, err) - continue outer - } - if !flush { - continue - } - if err := w.Flush(); err != nil { - t.Errorf("i=%d, firstN=%d, flush=%t: Flush: %v", i, firstN, flush, err) - continue outer - } - } - if err := w.Close(); err != nil { - t.Errorf("i=%d, firstN=%d, flush=%t: Close: %v", i, firstN, flush, err) - continue - } - - r := NewReader(buf) - got, err := io.ReadAll(r) - if err != nil { - t.Errorf("i=%d, firstN=%d, flush=%t: ReadAll: %v", i, firstN, flush, err) - continue - } - r.Close() - - if !bytes.Equal(got, want) { - t.Errorf("i=%d, firstN=%d, flush=%t: corruption during deflate-then-inflate", i, firstN, flush) - continue - } - } - } - } -} diff --git a/vendor/github.com/openebs/lvm-localpv/LICENSE b/vendor/github.com/openebs/lvm-localpv/LICENSE new file mode 100644 index 000000000000..261eeb9e9f8b --- /dev/null +++ b/vendor/github.com/openebs/lvm-localpv/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/openebs/lvm-localpv/pkg/apis/openebs.io/lvm/v1alpha1/doc.go b/vendor/github.com/openebs/lvm-localpv/pkg/apis/openebs.io/lvm/v1alpha1/doc.go new file mode 100644 index 000000000000..e020bb42436a --- /dev/null +++ b/vendor/github.com/openebs/lvm-localpv/pkg/apis/openebs.io/lvm/v1alpha1/doc.go @@ -0,0 +1,21 @@ +/* +Copyright © 2019 The OpenEBS Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package,register + +// Package v1alpha1 is the API version +// +groupName=local.openebs.io +package v1alpha1 diff --git a/vendor/github.com/openebs/lvm-localpv/pkg/apis/openebs.io/lvm/v1alpha1/lvmnode.go b/vendor/github.com/openebs/lvm-localpv/pkg/apis/openebs.io/lvm/v1alpha1/lvmnode.go new file mode 100644 index 000000000000..9d64e9a71234 --- /dev/null +++ b/vendor/github.com/openebs/lvm-localpv/pkg/apis/openebs.io/lvm/v1alpha1/lvmnode.go @@ -0,0 +1,125 @@ +/* + Copyright © 2021 The OpenEBS Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +resource:path=lvmnode + +// LVMNode records information about all lvm volume groups available +// in a node. In general, the openebs node-agent creates the LVMNode +// object & periodically synchronizing the volume groups available in the node. +// LVMNode has an owner reference pointing to the corresponding node object. +// +kubebuilder:object:root=true +// +kubebuilder:resource:scope=Namespaced,shortName=lvmnode +type LVMNode struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + VolumeGroups []VolumeGroup `json:"volumeGroups"` +} + +// VolumeGroup specifies attributes of a given vg exists on node. +type VolumeGroup struct { + // Name of the lvm volume group. + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + Name string `json:"name"` + + // UUID denotes a unique identity of a lvm volume group. + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + UUID string `json:"uuid"` + + // Size specifies the total size of volume group. + // +kubebuilder:validation:Required + Size resource.Quantity `json:"size"` + // Free specifies the available capacity of volume group. + // +kubebuilder:validation:Required + Free resource.Quantity `json:"free"` + + // LVCount denotes total number of logical volumes in + // volume group. + // +kubebuilder:validation:Required + // +kubebuilder:validation:Minimum=0 + LVCount int32 `json:"lvCount"` + // PVCount denotes total number of physical volumes + // constituting the volume group. + // +kubebuilder:validation:Required + // +kubebuilder:validation:Minimum=0 + PVCount int32 `json:"pvCount"` + + // MaxLV denotes maximum number of logical volumes allowed + // in volume group or 0 if unlimited. + MaxLV int32 `json:"maxLv"` + + // MaxPV denotes maximum number of physical volumes allowed + // in volume group or 0 if unlimited. + MaxPV int32 `json:"maxPv"` + + // SnapCount denotes number of snapshots in volume group. + SnapCount int32 `json:"snapCount"` + + // MissingPVCount denotes number of physical volumes in + // volume group which are missing. + MissingPVCount int32 `json:"missingPvCount"` + + // MetadataCount denotes number of metadata areas on the + // volume group. + MetadataCount int32 `json:"metadataCount"` + + // MetadataUsedCount denotes number of used metadata areas in + // volume group + MetadataUsedCount int32 `json:"metadataUsedCount"` + + // MetadataFree specifies the available metadata area space + // for the volume group + MetadataFree resource.Quantity `json:"metadataFree"` + + // MetadataSize specifies size of smallest metadata area + // for the volume group + MetadataSize resource.Quantity `json:"metadataSize"` + + // Permission indicates the volume group permission + // which can be writable or read-only. + // Permission has the following mapping between + // int and string for its value: + // [-1: "", 0: "writeable", 1: "read-only"] + Permission int `json:"permissions"` + + // AllocationPolicy indicates the volume group allocation + // policy. + // AllocationPolicy has the following mapping between + // int and string for its value: + // [-1: "", 0: "normal", 1: "contiguous", 2: "cling", 3: "anywhere", 4: "inherited"] + AllocationPolicy int `json:"allocationPolicy"` +} + +// LVMNodeList is a collection of LVMNode resources +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +resource:path=lvmnodes +type LVMNodeList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []LVMNode `json:"items"` +} diff --git a/vendor/github.com/openebs/lvm-localpv/pkg/apis/openebs.io/lvm/v1alpha1/lvmsnapshot.go b/vendor/github.com/openebs/lvm-localpv/pkg/apis/openebs.io/lvm/v1alpha1/lvmsnapshot.go new file mode 100644 index 000000000000..16bae5b04916 --- /dev/null +++ b/vendor/github.com/openebs/lvm-localpv/pkg/apis/openebs.io/lvm/v1alpha1/lvmsnapshot.go @@ -0,0 +1,68 @@ +/* +Copyright 2021 The OpenEBS Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +resource:path=lvmsnapshot + +// LVMSnapshot represents an LVM Snapshot of the lvm volume +type LVMSnapshot struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec LVMSnapshotSpec `json:"spec"` + Status SnapStatus `json:"status"` +} + +// LVMSnapshotSpec defines LVMSnapshot spec +type LVMSnapshotSpec struct { + // OwnerNodeID is the Node ID where the volume group is present which is where + // the snapshot has been provisioned. + // OwnerNodeID can not be edited after the snapshot has been provisioned. + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:Required + OwnerNodeID string `json:"ownerNodeID"` + + // VolGroup specifies the name of the volume group where the snapshot has been created. + // +kubebuilder:validation:Required + VolGroup string `json:"volGroup"` + + // SnapSize specifies the space reserved for the snapshot + // +kubebuilder:validation:Required + SnapSize string `json:"snapSize,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +resource:path=lvmsnapshots + +// LVMSnapshotList is a list of LVMSnapshot resources +type LVMSnapshotList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []LVMSnapshot `json:"items"` +} + +// SnapStatus string that reflects if the snapshot was created successfully +type SnapStatus struct { + State string `json:"state,omitempty"` +} diff --git a/vendor/github.com/openebs/lvm-localpv/pkg/apis/openebs.io/lvm/v1alpha1/lvmvolume.go b/vendor/github.com/openebs/lvm-localpv/pkg/apis/openebs.io/lvm/v1alpha1/lvmvolume.go new file mode 100644 index 000000000000..d8d098233986 --- /dev/null +++ b/vendor/github.com/openebs/lvm-localpv/pkg/apis/openebs.io/lvm/v1alpha1/lvmvolume.go @@ -0,0 +1,123 @@ +/* +Copyright © 2019 The OpenEBS Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +resource:path=lvmvolume + +// LVMVolume represents a LVM based volume +// +kubebuilder:object:root=true +// +kubebuilder:resource:scope=Namespaced,shortName=lvmvol +// +kubebuilder:printcolumn:name="VolGroup",type=string,JSONPath=`.spec.volGroup`,description="volume group where the volume is created" +// +kubebuilder:printcolumn:name="Node",type=string,JSONPath=`.spec.ownerNodeID`,description="Node where the volume is created" +// +kubebuilder:printcolumn:name="Size",type=string,JSONPath=`.spec.capacity`,description="Size of the volume" +// +kubebuilder:printcolumn:name="Status",type=string,JSONPath=`.status.state`,description="Status of the volume" +// +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp`,description="Age of the volume" +type LVMVolume struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec VolumeInfo `json:"spec"` + Status VolStatus `json:"status,omitempty"` +} + +// LVMVolumeList is a list of LVMVolume resources +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +resource:path=lvmvolumes +type LVMVolumeList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []LVMVolume `json:"items"` +} + +// VolumeInfo defines LVM info +type VolumeInfo struct { + + // OwnerNodeID is the Node ID where the volume group is present which is where + // the volume has been provisioned. + // OwnerNodeID can not be edited after the volume has been provisioned. + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:Required + OwnerNodeID string `json:"ownerNodeID"` + + // VolGroup specifies the name of the volume group where the volume has been created. + // +kubebuilder:validation:Required + VolGroup string `json:"volGroup"` + + // VgPattern specifies the regex to choose volume groups where volume + // needs to be created. + // +kubebuilder:validation:Required + VgPattern string `json:"vgPattern"` + + // Capacity of the volume + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + Capacity string `json:"capacity"` + + // Shared specifies whether the volume can be shared among multiple pods. + // If it is not set to "yes", then the LVM LocalPV Driver will not allow + // the volumes to be mounted by more than one pods. + // +kubebuilder:validation:Required + // +kubebuilder:validation:Enum=yes;no + Shared string `json:"shared,omitempty"` + + // ThinProvision specifies whether logical volumes can be thinly provisioned. + // If it is set to "yes", then the LVM LocalPV Driver will create + // thinProvision i.e. logical volumes that are larger than the available extents. + // +kubebuilder:validation:Required + // +kubebuilder:validation:Enum=yes;no + ThinProvision string `json:"thinProvision,omitempty"` +} + +// VolStatus string that specifies the current state of the volume provisioning request. +type VolStatus struct { + // State specifies the current state of the volume provisioning request. + // The state "Pending" means that the volume creation request has not + // processed yet. The state "Ready" means that the volume has been created + // and it is ready for the use. "Failed" means that volume provisioning + // has been failed and will not be retried by node agent controller. + // +kubebuilder:validation:Enum=Pending;Ready;Failed + State string `json:"state,omitempty"` + + // Error denotes the error occurred during provisioning/expanding a volume. + // Error field should only be set when State becomes Failed. + Error *VolumeError `json:"error,omitempty"` +} + +// VolumeError specifies the error occurred during volume provisioning. +type VolumeError struct { + Code VolumeErrorCode `json:"code,omitempty"` + Message string `json:"message,omitempty"` +} + +// VolumeErrorCode represents the error code to represent +// specific class of errors. +type VolumeErrorCode string + +const ( + // Internal represents system internal error. + Internal VolumeErrorCode = "Internal" + // InsufficientCapacity represent lvm vg doesn't + // have enough capacity to fit the lv request. + InsufficientCapacity VolumeErrorCode = "InsufficientCapacity" +) diff --git a/vendor/github.com/openebs/lvm-localpv/pkg/apis/openebs.io/lvm/v1alpha1/register.go b/vendor/github.com/openebs/lvm-localpv/pkg/apis/openebs.io/lvm/v1alpha1/register.go new file mode 100644 index 000000000000..907a65d584f4 --- /dev/null +++ b/vendor/github.com/openebs/lvm-localpv/pkg/apis/openebs.io/lvm/v1alpha1/register.go @@ -0,0 +1,82 @@ +/* +Copyright © 2019 The OpenEBS Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// SchemeGroupVersion is group version used +// to register custom resources +// +// NOTE: +// +// This variable name should not be changed +var SchemeGroupVersion = schema.GroupVersion{ + Group: "local.openebs.io", + Version: "v1alpha1", +} + +// Resource takes an unqualified resource and +// returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion. + WithResource(resource). + GroupResource() +} + +var ( + // SchemeBuilder is the scheme builder + // with scheme init functions to run + // for this API package + SchemeBuilder runtime.SchemeBuilder + + localSchemeBuilder = &SchemeBuilder + + // AddToScheme is a global function that + // registers this API group & version to + // a scheme + AddToScheme = localSchemeBuilder.AddToScheme +) + +func init() { + // We only register manually written functions + // here. This registration of generated functions + // takes place in the generated files. + // + // NOTE: + // This separation makes the code compile even + // when the generated files are missing. + localSchemeBuilder.Register(addKnownTypes) +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes( + SchemeGroupVersion, + &LVMVolume{}, + &LVMVolumeList{}, + &LVMSnapshot{}, + &LVMSnapshotList{}, + &LVMNode{}, + &LVMNodeList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/github.com/openebs/lvm-localpv/pkg/apis/openebs.io/lvm/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/openebs/lvm-localpv/pkg/apis/openebs.io/lvm/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 000000000000..dcfecf444a10 --- /dev/null +++ b/vendor/github.com/openebs/lvm-localpv/pkg/apis/openebs.io/lvm/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,319 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright 2021 The OpenEBS Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LVMNode) DeepCopyInto(out *LVMNode) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.VolumeGroups != nil { + in, out := &in.VolumeGroups, &out.VolumeGroups + *out = make([]VolumeGroup, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LVMNode. +func (in *LVMNode) DeepCopy() *LVMNode { + if in == nil { + return nil + } + out := new(LVMNode) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LVMNode) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LVMNodeList) DeepCopyInto(out *LVMNodeList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]LVMNode, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LVMNodeList. +func (in *LVMNodeList) DeepCopy() *LVMNodeList { + if in == nil { + return nil + } + out := new(LVMNodeList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LVMNodeList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LVMSnapshot) DeepCopyInto(out *LVMSnapshot) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LVMSnapshot. +func (in *LVMSnapshot) DeepCopy() *LVMSnapshot { + if in == nil { + return nil + } + out := new(LVMSnapshot) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LVMSnapshot) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LVMSnapshotList) DeepCopyInto(out *LVMSnapshotList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]LVMSnapshot, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LVMSnapshotList. +func (in *LVMSnapshotList) DeepCopy() *LVMSnapshotList { + if in == nil { + return nil + } + out := new(LVMSnapshotList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LVMSnapshotList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LVMSnapshotSpec) DeepCopyInto(out *LVMSnapshotSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LVMSnapshotSpec. +func (in *LVMSnapshotSpec) DeepCopy() *LVMSnapshotSpec { + if in == nil { + return nil + } + out := new(LVMSnapshotSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LVMVolume) DeepCopyInto(out *LVMVolume) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LVMVolume. +func (in *LVMVolume) DeepCopy() *LVMVolume { + if in == nil { + return nil + } + out := new(LVMVolume) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LVMVolume) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LVMVolumeList) DeepCopyInto(out *LVMVolumeList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]LVMVolume, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LVMVolumeList. +func (in *LVMVolumeList) DeepCopy() *LVMVolumeList { + if in == nil { + return nil + } + out := new(LVMVolumeList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LVMVolumeList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnapStatus) DeepCopyInto(out *SnapStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapStatus. +func (in *SnapStatus) DeepCopy() *SnapStatus { + if in == nil { + return nil + } + out := new(SnapStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolStatus) DeepCopyInto(out *VolStatus) { + *out = *in + if in.Error != nil { + in, out := &in.Error, &out.Error + *out = new(VolumeError) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolStatus. +func (in *VolStatus) DeepCopy() *VolStatus { + if in == nil { + return nil + } + out := new(VolStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeError) DeepCopyInto(out *VolumeError) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeError. +func (in *VolumeError) DeepCopy() *VolumeError { + if in == nil { + return nil + } + out := new(VolumeError) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeGroup) DeepCopyInto(out *VolumeGroup) { + *out = *in + out.Size = in.Size.DeepCopy() + out.Free = in.Free.DeepCopy() + out.MetadataFree = in.MetadataFree.DeepCopy() + out.MetadataSize = in.MetadataSize.DeepCopy() + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeGroup. +func (in *VolumeGroup) DeepCopy() *VolumeGroup { + if in == nil { + return nil + } + out := new(VolumeGroup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeInfo) DeepCopyInto(out *VolumeInfo) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeInfo. +func (in *VolumeInfo) DeepCopy() *VolumeInfo { + if in == nil { + return nil + } + out := new(VolumeInfo) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/prometheus/client_model/go/metrics.pb.go b/vendor/github.com/prometheus/client_model/go/metrics.pb.go index 35904ea19861..2b5bca4b999a 100644 --- a/vendor/github.com/prometheus/client_model/go/metrics.pb.go +++ b/vendor/github.com/prometheus/client_model/go/metrics.pb.go @@ -1,25 +1,38 @@ +// Copyright 2013 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + // Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v3.20.3 // source: io/prometheus/client/metrics.proto package io_prometheus_client import ( - fmt "fmt" - proto "github.com/golang/protobuf/proto" - timestamp "github.com/golang/protobuf/ptypes/timestamp" - math "math" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) type MetricType int32 @@ -38,23 +51,25 @@ const ( MetricType_GAUGE_HISTOGRAM MetricType = 5 ) -var MetricType_name = map[int32]string{ - 0: "COUNTER", - 1: "GAUGE", - 2: "SUMMARY", - 3: "UNTYPED", - 4: "HISTOGRAM", - 5: "GAUGE_HISTOGRAM", -} - -var MetricType_value = map[string]int32{ - "COUNTER": 0, - "GAUGE": 1, - "SUMMARY": 2, - "UNTYPED": 3, - "HISTOGRAM": 4, - "GAUGE_HISTOGRAM": 5, -} +// Enum value maps for MetricType. +var ( + MetricType_name = map[int32]string{ + 0: "COUNTER", + 1: "GAUGE", + 2: "SUMMARY", + 3: "UNTYPED", + 4: "HISTOGRAM", + 5: "GAUGE_HISTOGRAM", + } + MetricType_value = map[string]int32{ + "COUNTER": 0, + "GAUGE": 1, + "SUMMARY": 2, + "UNTYPED": 3, + "HISTOGRAM": 4, + "GAUGE_HISTOGRAM": 5, + } +) func (x MetricType) Enum() *MetricType { p := new(MetricType) @@ -63,449 +78,519 @@ func (x MetricType) Enum() *MetricType { } func (x MetricType) String() string { - return proto.EnumName(MetricType_name, int32(x)) + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) } -func (x *MetricType) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(MetricType_value, data, "MetricType") +func (MetricType) Descriptor() protoreflect.EnumDescriptor { + return file_io_prometheus_client_metrics_proto_enumTypes[0].Descriptor() +} + +func (MetricType) Type() protoreflect.EnumType { + return &file_io_prometheus_client_metrics_proto_enumTypes[0] +} + +func (x MetricType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *MetricType) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) if err != nil { return err } - *x = MetricType(value) + *x = MetricType(num) return nil } +// Deprecated: Use MetricType.Descriptor instead. func (MetricType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_d1e5ddb18987a258, []int{0} + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{0} } type LabelPair struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *LabelPair) Reset() { *m = LabelPair{} } -func (m *LabelPair) String() string { return proto.CompactTextString(m) } -func (*LabelPair) ProtoMessage() {} -func (*LabelPair) Descriptor() ([]byte, []int) { - return fileDescriptor_d1e5ddb18987a258, []int{0} + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` } -func (m *LabelPair) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_LabelPair.Unmarshal(m, b) -} -func (m *LabelPair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_LabelPair.Marshal(b, m, deterministic) -} -func (m *LabelPair) XXX_Merge(src proto.Message) { - xxx_messageInfo_LabelPair.Merge(m, src) +func (x *LabelPair) Reset() { + *x = LabelPair{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *LabelPair) XXX_Size() int { - return xxx_messageInfo_LabelPair.Size(m) + +func (x *LabelPair) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *LabelPair) XXX_DiscardUnknown() { - xxx_messageInfo_LabelPair.DiscardUnknown(m) + +func (*LabelPair) ProtoMessage() {} + +func (x *LabelPair) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_LabelPair proto.InternalMessageInfo +// Deprecated: Use LabelPair.ProtoReflect.Descriptor instead. +func (*LabelPair) Descriptor() ([]byte, []int) { + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{0} +} -func (m *LabelPair) GetName() string { - if m != nil && m.Name != nil { - return *m.Name +func (x *LabelPair) GetName() string { + if x != nil && x.Name != nil { + return *x.Name } return "" } -func (m *LabelPair) GetValue() string { - if m != nil && m.Value != nil { - return *m.Value +func (x *LabelPair) GetValue() string { + if x != nil && x.Value != nil { + return *x.Value } return "" } type Gauge struct { - Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *Gauge) Reset() { *m = Gauge{} } -func (m *Gauge) String() string { return proto.CompactTextString(m) } -func (*Gauge) ProtoMessage() {} -func (*Gauge) Descriptor() ([]byte, []int) { - return fileDescriptor_d1e5ddb18987a258, []int{1} + Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` } -func (m *Gauge) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Gauge.Unmarshal(m, b) -} -func (m *Gauge) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Gauge.Marshal(b, m, deterministic) -} -func (m *Gauge) XXX_Merge(src proto.Message) { - xxx_messageInfo_Gauge.Merge(m, src) +func (x *Gauge) Reset() { + *x = Gauge{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Gauge) XXX_Size() int { - return xxx_messageInfo_Gauge.Size(m) + +func (x *Gauge) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Gauge) XXX_DiscardUnknown() { - xxx_messageInfo_Gauge.DiscardUnknown(m) + +func (*Gauge) ProtoMessage() {} + +func (x *Gauge) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Gauge proto.InternalMessageInfo +// Deprecated: Use Gauge.ProtoReflect.Descriptor instead. +func (*Gauge) Descriptor() ([]byte, []int) { + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{1} +} -func (m *Gauge) GetValue() float64 { - if m != nil && m.Value != nil { - return *m.Value +func (x *Gauge) GetValue() float64 { + if x != nil && x.Value != nil { + return *x.Value } return 0 } type Counter struct { - Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` - Exemplar *Exemplar `protobuf:"bytes,2,opt,name=exemplar" json:"exemplar,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *Counter) Reset() { *m = Counter{} } -func (m *Counter) String() string { return proto.CompactTextString(m) } -func (*Counter) ProtoMessage() {} -func (*Counter) Descriptor() ([]byte, []int) { - return fileDescriptor_d1e5ddb18987a258, []int{2} + Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` + Exemplar *Exemplar `protobuf:"bytes,2,opt,name=exemplar" json:"exemplar,omitempty"` } -func (m *Counter) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Counter.Unmarshal(m, b) -} -func (m *Counter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Counter.Marshal(b, m, deterministic) -} -func (m *Counter) XXX_Merge(src proto.Message) { - xxx_messageInfo_Counter.Merge(m, src) +func (x *Counter) Reset() { + *x = Counter{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Counter) XXX_Size() int { - return xxx_messageInfo_Counter.Size(m) + +func (x *Counter) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Counter) XXX_DiscardUnknown() { - xxx_messageInfo_Counter.DiscardUnknown(m) + +func (*Counter) ProtoMessage() {} + +func (x *Counter) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Counter proto.InternalMessageInfo +// Deprecated: Use Counter.ProtoReflect.Descriptor instead. +func (*Counter) Descriptor() ([]byte, []int) { + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{2} +} -func (m *Counter) GetValue() float64 { - if m != nil && m.Value != nil { - return *m.Value +func (x *Counter) GetValue() float64 { + if x != nil && x.Value != nil { + return *x.Value } return 0 } -func (m *Counter) GetExemplar() *Exemplar { - if m != nil { - return m.Exemplar +func (x *Counter) GetExemplar() *Exemplar { + if x != nil { + return x.Exemplar } return nil } type Quantile struct { - Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"` - Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *Quantile) Reset() { *m = Quantile{} } -func (m *Quantile) String() string { return proto.CompactTextString(m) } -func (*Quantile) ProtoMessage() {} -func (*Quantile) Descriptor() ([]byte, []int) { - return fileDescriptor_d1e5ddb18987a258, []int{3} + Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"` + Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"` } -func (m *Quantile) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Quantile.Unmarshal(m, b) -} -func (m *Quantile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Quantile.Marshal(b, m, deterministic) -} -func (m *Quantile) XXX_Merge(src proto.Message) { - xxx_messageInfo_Quantile.Merge(m, src) +func (x *Quantile) Reset() { + *x = Quantile{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Quantile) XXX_Size() int { - return xxx_messageInfo_Quantile.Size(m) + +func (x *Quantile) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Quantile) XXX_DiscardUnknown() { - xxx_messageInfo_Quantile.DiscardUnknown(m) + +func (*Quantile) ProtoMessage() {} + +func (x *Quantile) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Quantile proto.InternalMessageInfo +// Deprecated: Use Quantile.ProtoReflect.Descriptor instead. +func (*Quantile) Descriptor() ([]byte, []int) { + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{3} +} -func (m *Quantile) GetQuantile() float64 { - if m != nil && m.Quantile != nil { - return *m.Quantile +func (x *Quantile) GetQuantile() float64 { + if x != nil && x.Quantile != nil { + return *x.Quantile } return 0 } -func (m *Quantile) GetValue() float64 { - if m != nil && m.Value != nil { - return *m.Value +func (x *Quantile) GetValue() float64 { + if x != nil && x.Value != nil { + return *x.Value } return 0 } type Summary struct { - SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"` - SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"` - Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *Summary) Reset() { *m = Summary{} } -func (m *Summary) String() string { return proto.CompactTextString(m) } -func (*Summary) ProtoMessage() {} -func (*Summary) Descriptor() ([]byte, []int) { - return fileDescriptor_d1e5ddb18987a258, []int{4} + SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"` + SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"` + Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"` } -func (m *Summary) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Summary.Unmarshal(m, b) -} -func (m *Summary) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Summary.Marshal(b, m, deterministic) -} -func (m *Summary) XXX_Merge(src proto.Message) { - xxx_messageInfo_Summary.Merge(m, src) +func (x *Summary) Reset() { + *x = Summary{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Summary) XXX_Size() int { - return xxx_messageInfo_Summary.Size(m) + +func (x *Summary) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Summary) XXX_DiscardUnknown() { - xxx_messageInfo_Summary.DiscardUnknown(m) + +func (*Summary) ProtoMessage() {} + +func (x *Summary) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Summary proto.InternalMessageInfo +// Deprecated: Use Summary.ProtoReflect.Descriptor instead. +func (*Summary) Descriptor() ([]byte, []int) { + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{4} +} -func (m *Summary) GetSampleCount() uint64 { - if m != nil && m.SampleCount != nil { - return *m.SampleCount +func (x *Summary) GetSampleCount() uint64 { + if x != nil && x.SampleCount != nil { + return *x.SampleCount } return 0 } -func (m *Summary) GetSampleSum() float64 { - if m != nil && m.SampleSum != nil { - return *m.SampleSum +func (x *Summary) GetSampleSum() float64 { + if x != nil && x.SampleSum != nil { + return *x.SampleSum } return 0 } -func (m *Summary) GetQuantile() []*Quantile { - if m != nil { - return m.Quantile +func (x *Summary) GetQuantile() []*Quantile { + if x != nil { + return x.Quantile } return nil } type Untyped struct { - Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *Untyped) Reset() { *m = Untyped{} } -func (m *Untyped) String() string { return proto.CompactTextString(m) } -func (*Untyped) ProtoMessage() {} -func (*Untyped) Descriptor() ([]byte, []int) { - return fileDescriptor_d1e5ddb18987a258, []int{5} + Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` } -func (m *Untyped) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Untyped.Unmarshal(m, b) -} -func (m *Untyped) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Untyped.Marshal(b, m, deterministic) -} -func (m *Untyped) XXX_Merge(src proto.Message) { - xxx_messageInfo_Untyped.Merge(m, src) +func (x *Untyped) Reset() { + *x = Untyped{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Untyped) XXX_Size() int { - return xxx_messageInfo_Untyped.Size(m) + +func (x *Untyped) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Untyped) XXX_DiscardUnknown() { - xxx_messageInfo_Untyped.DiscardUnknown(m) + +func (*Untyped) ProtoMessage() {} + +func (x *Untyped) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Untyped proto.InternalMessageInfo +// Deprecated: Use Untyped.ProtoReflect.Descriptor instead. +func (*Untyped) Descriptor() ([]byte, []int) { + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{5} +} -func (m *Untyped) GetValue() float64 { - if m != nil && m.Value != nil { - return *m.Value +func (x *Untyped) GetValue() float64 { + if x != nil && x.Value != nil { + return *x.Value } return 0 } type Histogram struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"` - SampleCountFloat *float64 `protobuf:"fixed64,4,opt,name=sample_count_float,json=sampleCountFloat" json:"sample_count_float,omitempty"` + SampleCountFloat *float64 `protobuf:"fixed64,4,opt,name=sample_count_float,json=sampleCountFloat" json:"sample_count_float,omitempty"` // Overrides sample_count if > 0. SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"` // Buckets for the conventional histogram. - Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"` + Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"` // Ordered in increasing order of upper_bound, +Inf bucket is optional. // schema defines the bucket schema. Currently, valid numbers are -4 <= n <= 8. // They are all for base-2 bucket schemas, where 1 is a bucket boundary in each case, and // then each power of two is divided into 2^n logarithmic buckets. // Or in other words, each bucket boundary is the previous boundary times 2^(2^-n). // In the future, more bucket schemas may be added using numbers < -4 or > 8. Schema *int32 `protobuf:"zigzag32,5,opt,name=schema" json:"schema,omitempty"` - ZeroThreshold *float64 `protobuf:"fixed64,6,opt,name=zero_threshold,json=zeroThreshold" json:"zero_threshold,omitempty"` - ZeroCount *uint64 `protobuf:"varint,7,opt,name=zero_count,json=zeroCount" json:"zero_count,omitempty"` - ZeroCountFloat *float64 `protobuf:"fixed64,8,opt,name=zero_count_float,json=zeroCountFloat" json:"zero_count_float,omitempty"` + ZeroThreshold *float64 `protobuf:"fixed64,6,opt,name=zero_threshold,json=zeroThreshold" json:"zero_threshold,omitempty"` // Breadth of the zero bucket. + ZeroCount *uint64 `protobuf:"varint,7,opt,name=zero_count,json=zeroCount" json:"zero_count,omitempty"` // Count in zero bucket. + ZeroCountFloat *float64 `protobuf:"fixed64,8,opt,name=zero_count_float,json=zeroCountFloat" json:"zero_count_float,omitempty"` // Overrides sb_zero_count if > 0. // Negative buckets for the native histogram. NegativeSpan []*BucketSpan `protobuf:"bytes,9,rep,name=negative_span,json=negativeSpan" json:"negative_span,omitempty"` // Use either "negative_delta" or "negative_count", the former for // regular histograms with integer counts, the latter for float // histograms. - NegativeDelta []int64 `protobuf:"zigzag64,10,rep,name=negative_delta,json=negativeDelta" json:"negative_delta,omitempty"` - NegativeCount []float64 `protobuf:"fixed64,11,rep,name=negative_count,json=negativeCount" json:"negative_count,omitempty"` + NegativeDelta []int64 `protobuf:"zigzag64,10,rep,name=negative_delta,json=negativeDelta" json:"negative_delta,omitempty"` // Count delta of each bucket compared to previous one (or to zero for 1st bucket). + NegativeCount []float64 `protobuf:"fixed64,11,rep,name=negative_count,json=negativeCount" json:"negative_count,omitempty"` // Absolute count of each bucket. // Positive buckets for the native histogram. PositiveSpan []*BucketSpan `protobuf:"bytes,12,rep,name=positive_span,json=positiveSpan" json:"positive_span,omitempty"` // Use either "positive_delta" or "positive_count", the former for // regular histograms with integer counts, the latter for float // histograms. - PositiveDelta []int64 `protobuf:"zigzag64,13,rep,name=positive_delta,json=positiveDelta" json:"positive_delta,omitempty"` - PositiveCount []float64 `protobuf:"fixed64,14,rep,name=positive_count,json=positiveCount" json:"positive_count,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + PositiveDelta []int64 `protobuf:"zigzag64,13,rep,name=positive_delta,json=positiveDelta" json:"positive_delta,omitempty"` // Count delta of each bucket compared to previous one (or to zero for 1st bucket). + PositiveCount []float64 `protobuf:"fixed64,14,rep,name=positive_count,json=positiveCount" json:"positive_count,omitempty"` // Absolute count of each bucket. } -func (m *Histogram) Reset() { *m = Histogram{} } -func (m *Histogram) String() string { return proto.CompactTextString(m) } -func (*Histogram) ProtoMessage() {} -func (*Histogram) Descriptor() ([]byte, []int) { - return fileDescriptor_d1e5ddb18987a258, []int{6} +func (x *Histogram) Reset() { + *x = Histogram{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Histogram) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Histogram.Unmarshal(m, b) +func (x *Histogram) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Histogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Histogram.Marshal(b, m, deterministic) -} -func (m *Histogram) XXX_Merge(src proto.Message) { - xxx_messageInfo_Histogram.Merge(m, src) -} -func (m *Histogram) XXX_Size() int { - return xxx_messageInfo_Histogram.Size(m) -} -func (m *Histogram) XXX_DiscardUnknown() { - xxx_messageInfo_Histogram.DiscardUnknown(m) + +func (*Histogram) ProtoMessage() {} + +func (x *Histogram) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Histogram proto.InternalMessageInfo +// Deprecated: Use Histogram.ProtoReflect.Descriptor instead. +func (*Histogram) Descriptor() ([]byte, []int) { + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{6} +} -func (m *Histogram) GetSampleCount() uint64 { - if m != nil && m.SampleCount != nil { - return *m.SampleCount +func (x *Histogram) GetSampleCount() uint64 { + if x != nil && x.SampleCount != nil { + return *x.SampleCount } return 0 } -func (m *Histogram) GetSampleCountFloat() float64 { - if m != nil && m.SampleCountFloat != nil { - return *m.SampleCountFloat +func (x *Histogram) GetSampleCountFloat() float64 { + if x != nil && x.SampleCountFloat != nil { + return *x.SampleCountFloat } return 0 } -func (m *Histogram) GetSampleSum() float64 { - if m != nil && m.SampleSum != nil { - return *m.SampleSum +func (x *Histogram) GetSampleSum() float64 { + if x != nil && x.SampleSum != nil { + return *x.SampleSum } return 0 } -func (m *Histogram) GetBucket() []*Bucket { - if m != nil { - return m.Bucket +func (x *Histogram) GetBucket() []*Bucket { + if x != nil { + return x.Bucket } return nil } -func (m *Histogram) GetSchema() int32 { - if m != nil && m.Schema != nil { - return *m.Schema +func (x *Histogram) GetSchema() int32 { + if x != nil && x.Schema != nil { + return *x.Schema } return 0 } -func (m *Histogram) GetZeroThreshold() float64 { - if m != nil && m.ZeroThreshold != nil { - return *m.ZeroThreshold +func (x *Histogram) GetZeroThreshold() float64 { + if x != nil && x.ZeroThreshold != nil { + return *x.ZeroThreshold } return 0 } -func (m *Histogram) GetZeroCount() uint64 { - if m != nil && m.ZeroCount != nil { - return *m.ZeroCount +func (x *Histogram) GetZeroCount() uint64 { + if x != nil && x.ZeroCount != nil { + return *x.ZeroCount } return 0 } -func (m *Histogram) GetZeroCountFloat() float64 { - if m != nil && m.ZeroCountFloat != nil { - return *m.ZeroCountFloat +func (x *Histogram) GetZeroCountFloat() float64 { + if x != nil && x.ZeroCountFloat != nil { + return *x.ZeroCountFloat } return 0 } -func (m *Histogram) GetNegativeSpan() []*BucketSpan { - if m != nil { - return m.NegativeSpan +func (x *Histogram) GetNegativeSpan() []*BucketSpan { + if x != nil { + return x.NegativeSpan } return nil } -func (m *Histogram) GetNegativeDelta() []int64 { - if m != nil { - return m.NegativeDelta +func (x *Histogram) GetNegativeDelta() []int64 { + if x != nil { + return x.NegativeDelta } return nil } -func (m *Histogram) GetNegativeCount() []float64 { - if m != nil { - return m.NegativeCount +func (x *Histogram) GetNegativeCount() []float64 { + if x != nil { + return x.NegativeCount } return nil } -func (m *Histogram) GetPositiveSpan() []*BucketSpan { - if m != nil { - return m.PositiveSpan +func (x *Histogram) GetPositiveSpan() []*BucketSpan { + if x != nil { + return x.PositiveSpan } return nil } -func (m *Histogram) GetPositiveDelta() []int64 { - if m != nil { - return m.PositiveDelta +func (x *Histogram) GetPositiveDelta() []int64 { + if x != nil { + return x.PositiveDelta } return nil } -func (m *Histogram) GetPositiveCount() []float64 { - if m != nil { - return m.PositiveCount +func (x *Histogram) GetPositiveCount() []float64 { + if x != nil { + return x.PositiveCount } return nil } @@ -513,64 +598,72 @@ func (m *Histogram) GetPositiveCount() []float64 { // A Bucket of a conventional histogram, each of which is treated as // an individual counter-like time series by Prometheus. type Bucket struct { - CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count,json=cumulativeCount" json:"cumulative_count,omitempty"` - CumulativeCountFloat *float64 `protobuf:"fixed64,4,opt,name=cumulative_count_float,json=cumulativeCountFloat" json:"cumulative_count_float,omitempty"` - UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound,json=upperBound" json:"upper_bound,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count,json=cumulativeCount" json:"cumulative_count,omitempty"` // Cumulative in increasing order. + CumulativeCountFloat *float64 `protobuf:"fixed64,4,opt,name=cumulative_count_float,json=cumulativeCountFloat" json:"cumulative_count_float,omitempty"` // Overrides cumulative_count if > 0. + UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound,json=upperBound" json:"upper_bound,omitempty"` // Inclusive. Exemplar *Exemplar `protobuf:"bytes,3,opt,name=exemplar" json:"exemplar,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` } -func (m *Bucket) Reset() { *m = Bucket{} } -func (m *Bucket) String() string { return proto.CompactTextString(m) } -func (*Bucket) ProtoMessage() {} -func (*Bucket) Descriptor() ([]byte, []int) { - return fileDescriptor_d1e5ddb18987a258, []int{7} +func (x *Bucket) Reset() { + *x = Bucket{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Bucket) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Bucket.Unmarshal(m, b) -} -func (m *Bucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Bucket.Marshal(b, m, deterministic) -} -func (m *Bucket) XXX_Merge(src proto.Message) { - xxx_messageInfo_Bucket.Merge(m, src) -} -func (m *Bucket) XXX_Size() int { - return xxx_messageInfo_Bucket.Size(m) +func (x *Bucket) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Bucket) XXX_DiscardUnknown() { - xxx_messageInfo_Bucket.DiscardUnknown(m) + +func (*Bucket) ProtoMessage() {} + +func (x *Bucket) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Bucket proto.InternalMessageInfo +// Deprecated: Use Bucket.ProtoReflect.Descriptor instead. +func (*Bucket) Descriptor() ([]byte, []int) { + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{7} +} -func (m *Bucket) GetCumulativeCount() uint64 { - if m != nil && m.CumulativeCount != nil { - return *m.CumulativeCount +func (x *Bucket) GetCumulativeCount() uint64 { + if x != nil && x.CumulativeCount != nil { + return *x.CumulativeCount } return 0 } -func (m *Bucket) GetCumulativeCountFloat() float64 { - if m != nil && m.CumulativeCountFloat != nil { - return *m.CumulativeCountFloat +func (x *Bucket) GetCumulativeCountFloat() float64 { + if x != nil && x.CumulativeCountFloat != nil { + return *x.CumulativeCountFloat } return 0 } -func (m *Bucket) GetUpperBound() float64 { - if m != nil && m.UpperBound != nil { - return *m.UpperBound +func (x *Bucket) GetUpperBound() float64 { + if x != nil && x.UpperBound != nil { + return *x.UpperBound } return 0 } -func (m *Bucket) GetExemplar() *Exemplar { - if m != nil { - return m.Exemplar +func (x *Bucket) GetExemplar() *Exemplar { + if x != nil { + return x.Exemplar } return nil } @@ -582,333 +675,658 @@ func (m *Bucket) GetExemplar() *Exemplar { // structured here (with all the buckets in a single array separate // from the Spans). type BucketSpan struct { - Offset *int32 `protobuf:"zigzag32,1,opt,name=offset" json:"offset,omitempty"` - Length *uint32 `protobuf:"varint,2,opt,name=length" json:"length,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *BucketSpan) Reset() { *m = BucketSpan{} } -func (m *BucketSpan) String() string { return proto.CompactTextString(m) } -func (*BucketSpan) ProtoMessage() {} -func (*BucketSpan) Descriptor() ([]byte, []int) { - return fileDescriptor_d1e5ddb18987a258, []int{8} + Offset *int32 `protobuf:"zigzag32,1,opt,name=offset" json:"offset,omitempty"` // Gap to previous span, or starting point for 1st span (which can be negative). + Length *uint32 `protobuf:"varint,2,opt,name=length" json:"length,omitempty"` // Length of consecutive buckets. } -func (m *BucketSpan) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_BucketSpan.Unmarshal(m, b) -} -func (m *BucketSpan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_BucketSpan.Marshal(b, m, deterministic) -} -func (m *BucketSpan) XXX_Merge(src proto.Message) { - xxx_messageInfo_BucketSpan.Merge(m, src) +func (x *BucketSpan) Reset() { + *x = BucketSpan{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *BucketSpan) XXX_Size() int { - return xxx_messageInfo_BucketSpan.Size(m) + +func (x *BucketSpan) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *BucketSpan) XXX_DiscardUnknown() { - xxx_messageInfo_BucketSpan.DiscardUnknown(m) + +func (*BucketSpan) ProtoMessage() {} + +func (x *BucketSpan) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_BucketSpan proto.InternalMessageInfo +// Deprecated: Use BucketSpan.ProtoReflect.Descriptor instead. +func (*BucketSpan) Descriptor() ([]byte, []int) { + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{8} +} -func (m *BucketSpan) GetOffset() int32 { - if m != nil && m.Offset != nil { - return *m.Offset +func (x *BucketSpan) GetOffset() int32 { + if x != nil && x.Offset != nil { + return *x.Offset } return 0 } -func (m *BucketSpan) GetLength() uint32 { - if m != nil && m.Length != nil { - return *m.Length +func (x *BucketSpan) GetLength() uint32 { + if x != nil && x.Length != nil { + return *x.Length } return 0 } type Exemplar struct { - Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"` - Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"` - Timestamp *timestamp.Timestamp `protobuf:"bytes,3,opt,name=timestamp" json:"timestamp,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *Exemplar) Reset() { *m = Exemplar{} } -func (m *Exemplar) String() string { return proto.CompactTextString(m) } -func (*Exemplar) ProtoMessage() {} -func (*Exemplar) Descriptor() ([]byte, []int) { - return fileDescriptor_d1e5ddb18987a258, []int{9} + Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"` + Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"` + Timestamp *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=timestamp" json:"timestamp,omitempty"` // OpenMetrics-style. } -func (m *Exemplar) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Exemplar.Unmarshal(m, b) -} -func (m *Exemplar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Exemplar.Marshal(b, m, deterministic) -} -func (m *Exemplar) XXX_Merge(src proto.Message) { - xxx_messageInfo_Exemplar.Merge(m, src) +func (x *Exemplar) Reset() { + *x = Exemplar{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Exemplar) XXX_Size() int { - return xxx_messageInfo_Exemplar.Size(m) + +func (x *Exemplar) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Exemplar) XXX_DiscardUnknown() { - xxx_messageInfo_Exemplar.DiscardUnknown(m) + +func (*Exemplar) ProtoMessage() {} + +func (x *Exemplar) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Exemplar proto.InternalMessageInfo +// Deprecated: Use Exemplar.ProtoReflect.Descriptor instead. +func (*Exemplar) Descriptor() ([]byte, []int) { + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{9} +} -func (m *Exemplar) GetLabel() []*LabelPair { - if m != nil { - return m.Label +func (x *Exemplar) GetLabel() []*LabelPair { + if x != nil { + return x.Label } return nil } -func (m *Exemplar) GetValue() float64 { - if m != nil && m.Value != nil { - return *m.Value +func (x *Exemplar) GetValue() float64 { + if x != nil && x.Value != nil { + return *x.Value } return 0 } -func (m *Exemplar) GetTimestamp() *timestamp.Timestamp { - if m != nil { - return m.Timestamp +func (x *Exemplar) GetTimestamp() *timestamppb.Timestamp { + if x != nil { + return x.Timestamp } return nil } type Metric struct { - Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"` - Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"` - Counter *Counter `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"` - Summary *Summary `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"` - Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"` - Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"` - TimestampMs *int64 `protobuf:"varint,6,opt,name=timestamp_ms,json=timestampMs" json:"timestamp_ms,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Metric) Reset() { *m = Metric{} } -func (m *Metric) String() string { return proto.CompactTextString(m) } -func (*Metric) ProtoMessage() {} -func (*Metric) Descriptor() ([]byte, []int) { - return fileDescriptor_d1e5ddb18987a258, []int{10} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"` + Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"` + Counter *Counter `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"` + Summary *Summary `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"` + Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"` + Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"` + TimestampMs *int64 `protobuf:"varint,6,opt,name=timestamp_ms,json=timestampMs" json:"timestamp_ms,omitempty"` +} + +func (x *Metric) Reset() { + *x = Metric{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Metric) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Metric.Unmarshal(m, b) +func (x *Metric) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Metric.Marshal(b, m, deterministic) -} -func (m *Metric) XXX_Merge(src proto.Message) { - xxx_messageInfo_Metric.Merge(m, src) -} -func (m *Metric) XXX_Size() int { - return xxx_messageInfo_Metric.Size(m) -} -func (m *Metric) XXX_DiscardUnknown() { - xxx_messageInfo_Metric.DiscardUnknown(m) + +func (*Metric) ProtoMessage() {} + +func (x *Metric) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Metric proto.InternalMessageInfo +// Deprecated: Use Metric.ProtoReflect.Descriptor instead. +func (*Metric) Descriptor() ([]byte, []int) { + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{10} +} -func (m *Metric) GetLabel() []*LabelPair { - if m != nil { - return m.Label +func (x *Metric) GetLabel() []*LabelPair { + if x != nil { + return x.Label } return nil } -func (m *Metric) GetGauge() *Gauge { - if m != nil { - return m.Gauge +func (x *Metric) GetGauge() *Gauge { + if x != nil { + return x.Gauge } return nil } -func (m *Metric) GetCounter() *Counter { - if m != nil { - return m.Counter +func (x *Metric) GetCounter() *Counter { + if x != nil { + return x.Counter } return nil } -func (m *Metric) GetSummary() *Summary { - if m != nil { - return m.Summary +func (x *Metric) GetSummary() *Summary { + if x != nil { + return x.Summary } return nil } -func (m *Metric) GetUntyped() *Untyped { - if m != nil { - return m.Untyped +func (x *Metric) GetUntyped() *Untyped { + if x != nil { + return x.Untyped } return nil } -func (m *Metric) GetHistogram() *Histogram { - if m != nil { - return m.Histogram +func (x *Metric) GetHistogram() *Histogram { + if x != nil { + return x.Histogram } return nil } -func (m *Metric) GetTimestampMs() int64 { - if m != nil && m.TimestampMs != nil { - return *m.TimestampMs +func (x *Metric) GetTimestampMs() int64 { + if x != nil && x.TimestampMs != nil { + return *x.TimestampMs } return 0 } type MetricFamily struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"` - Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"` - Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MetricFamily) Reset() { *m = MetricFamily{} } -func (m *MetricFamily) String() string { return proto.CompactTextString(m) } -func (*MetricFamily) ProtoMessage() {} -func (*MetricFamily) Descriptor() ([]byte, []int) { - return fileDescriptor_d1e5ddb18987a258, []int{11} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"` + Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"` + Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"` +} + +func (x *MetricFamily) Reset() { + *x = MetricFamily{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *MetricFamily) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_MetricFamily.Unmarshal(m, b) -} -func (m *MetricFamily) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_MetricFamily.Marshal(b, m, deterministic) -} -func (m *MetricFamily) XXX_Merge(src proto.Message) { - xxx_messageInfo_MetricFamily.Merge(m, src) -} -func (m *MetricFamily) XXX_Size() int { - return xxx_messageInfo_MetricFamily.Size(m) +func (x *MetricFamily) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *MetricFamily) XXX_DiscardUnknown() { - xxx_messageInfo_MetricFamily.DiscardUnknown(m) + +func (*MetricFamily) ProtoMessage() {} + +func (x *MetricFamily) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_MetricFamily proto.InternalMessageInfo +// Deprecated: Use MetricFamily.ProtoReflect.Descriptor instead. +func (*MetricFamily) Descriptor() ([]byte, []int) { + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{11} +} -func (m *MetricFamily) GetName() string { - if m != nil && m.Name != nil { - return *m.Name +func (x *MetricFamily) GetName() string { + if x != nil && x.Name != nil { + return *x.Name } return "" } -func (m *MetricFamily) GetHelp() string { - if m != nil && m.Help != nil { - return *m.Help +func (x *MetricFamily) GetHelp() string { + if x != nil && x.Help != nil { + return *x.Help } return "" } -func (m *MetricFamily) GetType() MetricType { - if m != nil && m.Type != nil { - return *m.Type +func (x *MetricFamily) GetType() MetricType { + if x != nil && x.Type != nil { + return *x.Type } return MetricType_COUNTER } -func (m *MetricFamily) GetMetric() []*Metric { - if m != nil { - return m.Metric +func (x *MetricFamily) GetMetric() []*Metric { + if x != nil { + return x.Metric } return nil } -func init() { - proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value) - proto.RegisterType((*LabelPair)(nil), "io.prometheus.client.LabelPair") - proto.RegisterType((*Gauge)(nil), "io.prometheus.client.Gauge") - proto.RegisterType((*Counter)(nil), "io.prometheus.client.Counter") - proto.RegisterType((*Quantile)(nil), "io.prometheus.client.Quantile") - proto.RegisterType((*Summary)(nil), "io.prometheus.client.Summary") - proto.RegisterType((*Untyped)(nil), "io.prometheus.client.Untyped") - proto.RegisterType((*Histogram)(nil), "io.prometheus.client.Histogram") - proto.RegisterType((*Bucket)(nil), "io.prometheus.client.Bucket") - proto.RegisterType((*BucketSpan)(nil), "io.prometheus.client.BucketSpan") - proto.RegisterType((*Exemplar)(nil), "io.prometheus.client.Exemplar") - proto.RegisterType((*Metric)(nil), "io.prometheus.client.Metric") - proto.RegisterType((*MetricFamily)(nil), "io.prometheus.client.MetricFamily") -} - -func init() { - proto.RegisterFile("io/prometheus/client/metrics.proto", fileDescriptor_d1e5ddb18987a258) -} - -var fileDescriptor_d1e5ddb18987a258 = []byte{ - // 896 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x56, 0xdd, 0x8e, 0xdb, 0x44, - 0x18, 0xc5, 0x9b, 0x5f, 0x7f, 0xd9, 0x6c, 0xd3, 0x61, 0x55, 0x59, 0x0b, 0xcb, 0x06, 0x4b, 0x48, - 0x0b, 0x42, 0x8e, 0x40, 0x5b, 0x81, 0x0a, 0x5c, 0xec, 0xb6, 0xe9, 0x16, 0x89, 0xb4, 0x65, 0x92, - 0x5c, 0x14, 0x2e, 0xac, 0x49, 0x32, 0xeb, 0x58, 0x78, 0x3c, 0xc6, 0x1e, 0x57, 0x2c, 0x2f, 0xc0, - 0x35, 0xaf, 0xc0, 0xc3, 0xf0, 0x22, 0x3c, 0x08, 0x68, 0xfe, 0xec, 0xdd, 0xe2, 0x94, 0xd2, 0x3b, - 0x7f, 0x67, 0xce, 0xf7, 0xcd, 0x39, 0xe3, 0xc9, 0x71, 0xc0, 0x8f, 0xf9, 0x24, 0xcb, 0x39, 0xa3, - 0x62, 0x4b, 0xcb, 0x62, 0xb2, 0x4e, 0x62, 0x9a, 0x8a, 0x09, 0xa3, 0x22, 0x8f, 0xd7, 0x45, 0x90, - 0xe5, 0x5c, 0x70, 0x74, 0x18, 0xf3, 0xa0, 0xe6, 0x04, 0x9a, 0x73, 0x74, 0x12, 0x71, 0x1e, 0x25, - 0x74, 0xa2, 0x38, 0xab, 0xf2, 0x6a, 0x22, 0x62, 0x46, 0x0b, 0x41, 0x58, 0xa6, 0xdb, 0xfc, 0xfb, - 0xe0, 0x7e, 0x47, 0x56, 0x34, 0x79, 0x4e, 0xe2, 0x1c, 0x21, 0x68, 0xa7, 0x84, 0x51, 0xcf, 0x19, - 0x3b, 0xa7, 0x2e, 0x56, 0xcf, 0xe8, 0x10, 0x3a, 0x2f, 0x49, 0x52, 0x52, 0x6f, 0x4f, 0x81, 0xba, - 0xf0, 0x8f, 0xa1, 0x73, 0x49, 0xca, 0xe8, 0xc6, 0xb2, 0xec, 0x71, 0xec, 0xf2, 0x8f, 0xd0, 0x7b, - 0xc8, 0xcb, 0x54, 0xd0, 0xbc, 0x99, 0x80, 0x1e, 0x40, 0x9f, 0xfe, 0x42, 0x59, 0x96, 0x90, 0x5c, - 0x0d, 0x1e, 0x7c, 0xfe, 0x41, 0xd0, 0x64, 0x20, 0x98, 0x1a, 0x16, 0xae, 0xf8, 0xfe, 0xd7, 0xd0, - 0xff, 0xbe, 0x24, 0xa9, 0x88, 0x13, 0x8a, 0x8e, 0xa0, 0xff, 0xb3, 0x79, 0x36, 0x1b, 0x54, 0xf5, - 0x6d, 0xe5, 0x95, 0xb4, 0xdf, 0x1c, 0xe8, 0xcd, 0x4b, 0xc6, 0x48, 0x7e, 0x8d, 0x3e, 0x84, 0xfd, - 0x82, 0xb0, 0x2c, 0xa1, 0xe1, 0x5a, 0xaa, 0x55, 0x13, 0xda, 0x78, 0xa0, 0x31, 0x65, 0x00, 0x1d, - 0x03, 0x18, 0x4a, 0x51, 0x32, 0x33, 0xc9, 0xd5, 0xc8, 0xbc, 0x64, 0xd2, 0x47, 0xb5, 0x7f, 0x6b, - 0xdc, 0xda, 0xed, 0xc3, 0x2a, 0xae, 0xf5, 0xf9, 0x27, 0xd0, 0x5b, 0xa6, 0xe2, 0x3a, 0xa3, 0x9b, - 0x1d, 0xa7, 0xf8, 0x57, 0x1b, 0xdc, 0x27, 0x71, 0x21, 0x78, 0x94, 0x13, 0xf6, 0x26, 0x62, 0x3f, - 0x05, 0x74, 0x93, 0x12, 0x5e, 0x25, 0x9c, 0x08, 0xaf, 0xad, 0x66, 0x8e, 0x6e, 0x10, 0x1f, 0x4b, - 0xfc, 0xbf, 0xac, 0x9d, 0x41, 0x77, 0x55, 0xae, 0x7f, 0xa2, 0xc2, 0x18, 0x7b, 0xbf, 0xd9, 0xd8, - 0x85, 0xe2, 0x60, 0xc3, 0x45, 0xf7, 0xa0, 0x5b, 0xac, 0xb7, 0x94, 0x11, 0xaf, 0x33, 0x76, 0x4e, - 0xef, 0x62, 0x53, 0xa1, 0x8f, 0xe0, 0xe0, 0x57, 0x9a, 0xf3, 0x50, 0x6c, 0x73, 0x5a, 0x6c, 0x79, - 0xb2, 0xf1, 0xba, 0x6a, 0xc3, 0xa1, 0x44, 0x17, 0x16, 0x94, 0x9a, 0x14, 0x4d, 0x5b, 0xec, 0x29, - 0x8b, 0xae, 0x44, 0xb4, 0xc1, 0x53, 0x18, 0xd5, 0xcb, 0xc6, 0x5e, 0x5f, 0xcd, 0x39, 0xa8, 0x48, - 0xda, 0xdc, 0x14, 0x86, 0x29, 0x8d, 0x88, 0x88, 0x5f, 0xd2, 0xb0, 0xc8, 0x48, 0xea, 0xb9, 0xca, - 0xc4, 0xf8, 0x75, 0x26, 0xe6, 0x19, 0x49, 0xf1, 0xbe, 0x6d, 0x93, 0x95, 0x94, 0x5d, 0x8d, 0xd9, - 0xd0, 0x44, 0x10, 0x0f, 0xc6, 0xad, 0x53, 0x84, 0xab, 0xe1, 0x8f, 0x24, 0x78, 0x8b, 0xa6, 0xa5, - 0x0f, 0xc6, 0x2d, 0xe9, 0xce, 0xa2, 0x5a, 0xfe, 0x14, 0x86, 0x19, 0x2f, 0xe2, 0x5a, 0xd4, 0xfe, - 0x9b, 0x8a, 0xb2, 0x6d, 0x56, 0x54, 0x35, 0x46, 0x8b, 0x1a, 0x6a, 0x51, 0x16, 0xad, 0x44, 0x55, - 0x34, 0x2d, 0xea, 0x40, 0x8b, 0xb2, 0xa8, 0x12, 0xe5, 0xff, 0xe9, 0x40, 0x57, 0x6f, 0x85, 0x3e, - 0x86, 0xd1, 0xba, 0x64, 0x65, 0x72, 0xd3, 0x88, 0xbe, 0x66, 0x77, 0x6a, 0x5c, 0x5b, 0x39, 0x83, - 0x7b, 0xaf, 0x52, 0x6f, 0x5d, 0xb7, 0xc3, 0x57, 0x1a, 0xf4, 0x5b, 0x39, 0x81, 0x41, 0x99, 0x65, - 0x34, 0x0f, 0x57, 0xbc, 0x4c, 0x37, 0xe6, 0xce, 0x81, 0x82, 0x2e, 0x24, 0x72, 0x2b, 0x17, 0x5a, - 0xff, 0x3b, 0x17, 0xa0, 0x3e, 0x32, 0x79, 0x11, 0xf9, 0xd5, 0x55, 0x41, 0xb5, 0x83, 0xbb, 0xd8, - 0x54, 0x12, 0x4f, 0x68, 0x1a, 0x89, 0xad, 0xda, 0x7d, 0x88, 0x4d, 0xe5, 0xff, 0xee, 0x40, 0xdf, - 0x0e, 0x45, 0xf7, 0xa1, 0x93, 0xc8, 0x54, 0xf4, 0x1c, 0xf5, 0x82, 0x4e, 0x9a, 0x35, 0x54, 0xc1, - 0x89, 0x35, 0xbb, 0x39, 0x71, 0xd0, 0x97, 0xe0, 0x56, 0xa9, 0x6b, 0x4c, 0x1d, 0x05, 0x3a, 0x97, - 0x03, 0x9b, 0xcb, 0xc1, 0xc2, 0x32, 0x70, 0x4d, 0xf6, 0xff, 0xde, 0x83, 0xee, 0x4c, 0xa5, 0xfc, - 0xdb, 0x2a, 0xfa, 0x0c, 0x3a, 0x91, 0xcc, 0x69, 0x13, 0xb2, 0xef, 0x35, 0xb7, 0xa9, 0x28, 0xc7, - 0x9a, 0x89, 0xbe, 0x80, 0xde, 0x5a, 0x67, 0xb7, 0x11, 0x7b, 0xdc, 0xdc, 0x64, 0x02, 0x1e, 0x5b, - 0xb6, 0x6c, 0x2c, 0x74, 0xb0, 0xaa, 0x3b, 0xb0, 0xb3, 0xd1, 0xa4, 0x2f, 0xb6, 0x6c, 0xd9, 0x58, - 0xea, 0x20, 0x54, 0xa1, 0xb1, 0xb3, 0xd1, 0xa4, 0x25, 0xb6, 0x6c, 0xf4, 0x0d, 0xb8, 0x5b, 0x9b, - 0x8f, 0x2a, 0x2c, 0x76, 0x1e, 0x4c, 0x15, 0xa3, 0xb8, 0xee, 0x90, 0x89, 0x5a, 0x9d, 0x75, 0xc8, - 0x0a, 0x95, 0x48, 0x2d, 0x3c, 0xa8, 0xb0, 0x59, 0xe1, 0xff, 0xe1, 0xc0, 0xbe, 0x7e, 0x03, 0x8f, - 0x09, 0x8b, 0x93, 0xeb, 0xc6, 0x4f, 0x24, 0x82, 0xf6, 0x96, 0x26, 0x99, 0xf9, 0x42, 0xaa, 0x67, - 0x74, 0x06, 0x6d, 0xa9, 0x51, 0x1d, 0xe1, 0xc1, 0xae, 0x5f, 0xb8, 0x9e, 0xbc, 0xb8, 0xce, 0x28, - 0x56, 0x6c, 0x99, 0xb9, 0xfa, 0xab, 0xee, 0xb5, 0x5f, 0x97, 0xb9, 0xba, 0x0f, 0x1b, 0xee, 0x27, - 0x2b, 0x80, 0x7a, 0x12, 0x1a, 0x40, 0xef, 0xe1, 0xb3, 0xe5, 0xd3, 0xc5, 0x14, 0x8f, 0xde, 0x41, - 0x2e, 0x74, 0x2e, 0xcf, 0x97, 0x97, 0xd3, 0x91, 0x23, 0xf1, 0xf9, 0x72, 0x36, 0x3b, 0xc7, 0x2f, - 0x46, 0x7b, 0xb2, 0x58, 0x3e, 0x5d, 0xbc, 0x78, 0x3e, 0x7d, 0x34, 0x6a, 0xa1, 0x21, 0xb8, 0x4f, - 0xbe, 0x9d, 0x2f, 0x9e, 0x5d, 0xe2, 0xf3, 0xd9, 0xa8, 0x8d, 0xde, 0x85, 0x3b, 0xaa, 0x27, 0xac, - 0xc1, 0xce, 0x05, 0x86, 0xc6, 0x3f, 0x18, 0x3f, 0x3c, 0x88, 0x62, 0xb1, 0x2d, 0x57, 0xc1, 0x9a, - 0xb3, 0x7f, 0xff, 0x45, 0x09, 0x19, 0xdf, 0xd0, 0x64, 0x12, 0xf1, 0xaf, 0x62, 0x1e, 0xd6, 0xab, - 0xa1, 0x5e, 0xfd, 0x27, 0x00, 0x00, 0xff, 0xff, 0x16, 0x77, 0x81, 0x98, 0xd7, 0x08, 0x00, 0x00, +var File_io_prometheus_client_metrics_proto protoreflect.FileDescriptor + +var file_io_prometheus_client_metrics_proto_rawDesc = []byte{ + 0x0a, 0x22, 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2f, + 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, + 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x35, 0x0a, 0x09, 0x4c, + 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x22, 0x1d, 0x0a, 0x05, 0x47, 0x61, 0x75, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x22, 0x5b, 0x0a, 0x07, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x12, 0x14, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, + 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78, 0x65, 0x6d, + 0x70, 0x6c, 0x61, 0x72, 0x52, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x22, 0x3c, + 0x0a, 0x08, 0x51, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x71, 0x75, + 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x08, 0x71, 0x75, + 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x87, 0x01, 0x0a, + 0x07, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x61, 0x6d, 0x70, + 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, + 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, + 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, 0x73, 0x75, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, + 0x09, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x53, 0x75, 0x6d, 0x12, 0x3a, 0x0a, 0x08, 0x71, 0x75, + 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, + 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x2e, 0x51, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x52, 0x08, 0x71, 0x75, + 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x22, 0x1f, 0x0a, 0x07, 0x55, 0x6e, 0x74, 0x79, 0x70, 0x65, + 0x64, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xe3, 0x04, 0x0a, 0x09, 0x48, 0x69, 0x73, 0x74, + 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, + 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x73, 0x61, 0x6d, + 0x70, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2c, 0x0a, 0x12, 0x73, 0x61, 0x6d, 0x70, + 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x01, 0x52, 0x10, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, + 0x74, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, + 0x5f, 0x73, 0x75, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x09, 0x73, 0x61, 0x6d, 0x70, + 0x6c, 0x65, 0x53, 0x75, 0x6d, 0x12, 0x34, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, + 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, + 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x42, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x73, + 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x11, 0x52, 0x06, 0x73, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x12, 0x25, 0x0a, 0x0e, 0x7a, 0x65, 0x72, 0x6f, 0x5f, 0x74, 0x68, 0x72, 0x65, + 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0d, 0x7a, 0x65, 0x72, + 0x6f, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x7a, 0x65, + 0x72, 0x6f, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, + 0x7a, 0x65, 0x72, 0x6f, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x28, 0x0a, 0x10, 0x7a, 0x65, 0x72, + 0x6f, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x01, 0x52, 0x0e, 0x7a, 0x65, 0x72, 0x6f, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x6c, + 0x6f, 0x61, 0x74, 0x12, 0x45, 0x0a, 0x0d, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, + 0x73, 0x70, 0x61, 0x6e, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x69, 0x6f, 0x2e, + 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x52, 0x0c, 0x6e, 0x65, + 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x25, 0x0a, 0x0e, 0x6e, 0x65, + 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x64, 0x65, 0x6c, 0x74, 0x61, 0x18, 0x0a, 0x20, 0x03, + 0x28, 0x12, 0x52, 0x0d, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x44, 0x65, 0x6c, 0x74, + 0x61, 0x12, 0x25, 0x0a, 0x0e, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f, + 0x75, 0x6e, 0x74, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x01, 0x52, 0x0d, 0x6e, 0x65, 0x67, 0x61, 0x74, + 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x45, 0x0a, 0x0d, 0x70, 0x6f, 0x73, 0x69, + 0x74, 0x69, 0x76, 0x65, 0x5f, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x20, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, + 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, + 0x6e, 0x52, 0x0c, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x12, + 0x25, 0x0a, 0x0e, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x64, 0x65, 0x6c, 0x74, + 0x61, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x12, 0x52, 0x0d, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, + 0x65, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, + 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x01, 0x52, 0x0d, + 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0xc6, 0x01, + 0x0a, 0x06, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x75, 0x6d, 0x75, + 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x0f, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, + 0x75, 0x6e, 0x74, 0x12, 0x34, 0x0a, 0x16, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, + 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x01, 0x52, 0x14, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, + 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x75, 0x70, 0x70, + 0x65, 0x72, 0x5f, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, + 0x75, 0x70, 0x70, 0x65, 0x72, 0x42, 0x6f, 0x75, 0x6e, 0x64, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x78, + 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, + 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x08, 0x65, 0x78, + 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x22, 0x3c, 0x0a, 0x0a, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, + 0x53, 0x70, 0x61, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x11, 0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, + 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6c, 0x65, + 0x6e, 0x67, 0x74, 0x68, 0x22, 0x91, 0x01, 0x0a, 0x08, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, + 0x72, 0x12, 0x35, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, + 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69, + 0x72, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x38, + 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0xff, 0x02, 0x0a, 0x06, 0x4d, 0x65, 0x74, + 0x72, 0x69, 0x63, 0x12, 0x35, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, + 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50, + 0x61, 0x69, 0x72, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x31, 0x0a, 0x05, 0x67, 0x61, + 0x75, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x69, 0x6f, 0x2e, 0x70, + 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x2e, 0x47, 0x61, 0x75, 0x67, 0x65, 0x52, 0x05, 0x67, 0x61, 0x75, 0x67, 0x65, 0x12, 0x37, 0x0a, + 0x07, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, + 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, + 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, + 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, + 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, + 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x53, + 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, + 0x37, 0x0a, 0x07, 0x75, 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, + 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x55, 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x52, + 0x07, 0x75, 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x12, 0x3d, 0x0a, 0x09, 0x68, 0x69, 0x73, 0x74, + 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f, + 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x2e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x52, 0x09, 0x68, 0x69, + 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x6d, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x74, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x4d, 0x73, 0x22, 0xa2, 0x01, 0x0a, 0x0c, 0x4d, + 0x65, 0x74, 0x72, 0x69, 0x63, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x12, 0x0a, 0x04, 0x68, 0x65, 0x6c, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, + 0x65, 0x6c, 0x70, 0x12, 0x34, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x20, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, + 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, + 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x34, 0x0a, 0x06, 0x6d, 0x65, 0x74, + 0x72, 0x69, 0x63, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x69, 0x6f, 0x2e, 0x70, + 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x2a, + 0x62, 0x0a, 0x0a, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, + 0x07, 0x43, 0x4f, 0x55, 0x4e, 0x54, 0x45, 0x52, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x47, 0x41, + 0x55, 0x47, 0x45, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x4d, 0x4d, 0x41, 0x52, 0x59, + 0x10, 0x02, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x54, 0x59, 0x50, 0x45, 0x44, 0x10, 0x03, 0x12, + 0x0d, 0x0a, 0x09, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10, 0x04, 0x12, 0x13, + 0x0a, 0x0f, 0x47, 0x41, 0x55, 0x47, 0x45, 0x5f, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, + 0x4d, 0x10, 0x05, 0x42, 0x52, 0x0a, 0x14, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, + 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5a, 0x3a, 0x67, 0x69, 0x74, + 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, + 0x75, 0x73, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2f, + 0x67, 0x6f, 0x3b, 0x69, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, + 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, +} + +var ( + file_io_prometheus_client_metrics_proto_rawDescOnce sync.Once + file_io_prometheus_client_metrics_proto_rawDescData = file_io_prometheus_client_metrics_proto_rawDesc +) + +func file_io_prometheus_client_metrics_proto_rawDescGZIP() []byte { + file_io_prometheus_client_metrics_proto_rawDescOnce.Do(func() { + file_io_prometheus_client_metrics_proto_rawDescData = protoimpl.X.CompressGZIP(file_io_prometheus_client_metrics_proto_rawDescData) + }) + return file_io_prometheus_client_metrics_proto_rawDescData +} + +var file_io_prometheus_client_metrics_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_io_prometheus_client_metrics_proto_msgTypes = make([]protoimpl.MessageInfo, 12) +var file_io_prometheus_client_metrics_proto_goTypes = []interface{}{ + (MetricType)(0), // 0: io.prometheus.client.MetricType + (*LabelPair)(nil), // 1: io.prometheus.client.LabelPair + (*Gauge)(nil), // 2: io.prometheus.client.Gauge + (*Counter)(nil), // 3: io.prometheus.client.Counter + (*Quantile)(nil), // 4: io.prometheus.client.Quantile + (*Summary)(nil), // 5: io.prometheus.client.Summary + (*Untyped)(nil), // 6: io.prometheus.client.Untyped + (*Histogram)(nil), // 7: io.prometheus.client.Histogram + (*Bucket)(nil), // 8: io.prometheus.client.Bucket + (*BucketSpan)(nil), // 9: io.prometheus.client.BucketSpan + (*Exemplar)(nil), // 10: io.prometheus.client.Exemplar + (*Metric)(nil), // 11: io.prometheus.client.Metric + (*MetricFamily)(nil), // 12: io.prometheus.client.MetricFamily + (*timestamppb.Timestamp)(nil), // 13: google.protobuf.Timestamp +} +var file_io_prometheus_client_metrics_proto_depIdxs = []int32{ + 10, // 0: io.prometheus.client.Counter.exemplar:type_name -> io.prometheus.client.Exemplar + 4, // 1: io.prometheus.client.Summary.quantile:type_name -> io.prometheus.client.Quantile + 8, // 2: io.prometheus.client.Histogram.bucket:type_name -> io.prometheus.client.Bucket + 9, // 3: io.prometheus.client.Histogram.negative_span:type_name -> io.prometheus.client.BucketSpan + 9, // 4: io.prometheus.client.Histogram.positive_span:type_name -> io.prometheus.client.BucketSpan + 10, // 5: io.prometheus.client.Bucket.exemplar:type_name -> io.prometheus.client.Exemplar + 1, // 6: io.prometheus.client.Exemplar.label:type_name -> io.prometheus.client.LabelPair + 13, // 7: io.prometheus.client.Exemplar.timestamp:type_name -> google.protobuf.Timestamp + 1, // 8: io.prometheus.client.Metric.label:type_name -> io.prometheus.client.LabelPair + 2, // 9: io.prometheus.client.Metric.gauge:type_name -> io.prometheus.client.Gauge + 3, // 10: io.prometheus.client.Metric.counter:type_name -> io.prometheus.client.Counter + 5, // 11: io.prometheus.client.Metric.summary:type_name -> io.prometheus.client.Summary + 6, // 12: io.prometheus.client.Metric.untyped:type_name -> io.prometheus.client.Untyped + 7, // 13: io.prometheus.client.Metric.histogram:type_name -> io.prometheus.client.Histogram + 0, // 14: io.prometheus.client.MetricFamily.type:type_name -> io.prometheus.client.MetricType + 11, // 15: io.prometheus.client.MetricFamily.metric:type_name -> io.prometheus.client.Metric + 16, // [16:16] is the sub-list for method output_type + 16, // [16:16] is the sub-list for method input_type + 16, // [16:16] is the sub-list for extension type_name + 16, // [16:16] is the sub-list for extension extendee + 0, // [0:16] is the sub-list for field type_name +} + +func init() { file_io_prometheus_client_metrics_proto_init() } +func file_io_prometheus_client_metrics_proto_init() { + if File_io_prometheus_client_metrics_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_io_prometheus_client_metrics_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LabelPair); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_client_metrics_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Gauge); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_client_metrics_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Counter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_client_metrics_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Quantile); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_client_metrics_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Summary); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_client_metrics_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Untyped); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_client_metrics_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Histogram); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_client_metrics_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Bucket); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_client_metrics_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BucketSpan); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_client_metrics_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Exemplar); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_client_metrics_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Metric); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_client_metrics_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MetricFamily); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_io_prometheus_client_metrics_proto_rawDesc, + NumEnums: 1, + NumMessages: 12, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_io_prometheus_client_metrics_proto_goTypes, + DependencyIndexes: file_io_prometheus_client_metrics_proto_depIdxs, + EnumInfos: file_io_prometheus_client_metrics_proto_enumTypes, + MessageInfos: file_io_prometheus_client_metrics_proto_msgTypes, + }.Build() + File_io_prometheus_client_metrics_proto = out.File + file_io_prometheus_client_metrics_proto_rawDesc = nil + file_io_prometheus_client_metrics_proto_goTypes = nil + file_io_prometheus_client_metrics_proto_depIdxs = nil } diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go index f4fc88455221..906397815138 100644 --- a/vendor/github.com/prometheus/common/expfmt/decode.go +++ b/vendor/github.com/prometheus/common/expfmt/decode.go @@ -132,7 +132,10 @@ func (d *textDecoder) Decode(v *dto.MetricFamily) error { } // Pick off one MetricFamily per Decode until there's nothing left. for key, fam := range d.fams { - *v = *fam + v.Name = fam.Name + v.Help = fam.Help + v.Type = fam.Type + v.Metric = fam.Metric delete(d.fams, key) return nil } diff --git a/vendor/github.com/prometheus/common/expfmt/encode.go b/vendor/github.com/prometheus/common/expfmt/encode.go index 64dc0eb40c28..7f611ffaad7c 100644 --- a/vendor/github.com/prometheus/common/expfmt/encode.go +++ b/vendor/github.com/prometheus/common/expfmt/encode.go @@ -18,9 +18,9 @@ import ( "io" "net/http" - "github.com/golang/protobuf/proto" //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. "github.com/matttproud/golang_protobuf_extensions/pbutil" "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg" + "google.golang.org/protobuf/encoding/prototext" dto "github.com/prometheus/client_model/go" ) @@ -99,8 +99,11 @@ func NegotiateIncludingOpenMetrics(h http.Header) Format { if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { return FmtText } - if ac.Type+"/"+ac.SubType == OpenMetricsType && (ver == OpenMetricsVersion || ver == "") { - return FmtOpenMetrics + if ac.Type+"/"+ac.SubType == OpenMetricsType && (ver == OpenMetricsVersion_0_0_1 || ver == OpenMetricsVersion_1_0_0 || ver == "") { + if ver == OpenMetricsVersion_1_0_0 { + return FmtOpenMetrics_1_0_0 + } + return FmtOpenMetrics_0_0_1 } } return FmtText @@ -133,7 +136,7 @@ func NewEncoder(w io.Writer, format Format) Encoder { case FmtProtoText: return encoderCloser{ encode: func(v *dto.MetricFamily) error { - _, err := fmt.Fprintln(w, proto.MarshalTextString(v)) + _, err := fmt.Fprintln(w, prototext.Format(v)) return err }, close: func() error { return nil }, @@ -146,7 +149,7 @@ func NewEncoder(w io.Writer, format Format) Encoder { }, close: func() error { return nil }, } - case FmtOpenMetrics: + case FmtOpenMetrics_0_0_1, FmtOpenMetrics_1_0_0: return encoderCloser{ encode: func(v *dto.MetricFamily) error { _, err := MetricFamilyToOpenMetrics(w, v) diff --git a/vendor/github.com/prometheus/common/expfmt/expfmt.go b/vendor/github.com/prometheus/common/expfmt/expfmt.go index 0f176fa64f25..c4cb20f0d3ef 100644 --- a/vendor/github.com/prometheus/common/expfmt/expfmt.go +++ b/vendor/github.com/prometheus/common/expfmt/expfmt.go @@ -19,20 +19,22 @@ type Format string // Constants to assemble the Content-Type values for the different wire protocols. const ( - TextVersion = "0.0.4" - ProtoType = `application/vnd.google.protobuf` - ProtoProtocol = `io.prometheus.client.MetricFamily` - ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" - OpenMetricsType = `application/openmetrics-text` - OpenMetricsVersion = "0.0.1" + TextVersion = "0.0.4" + ProtoType = `application/vnd.google.protobuf` + ProtoProtocol = `io.prometheus.client.MetricFamily` + ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" + OpenMetricsType = `application/openmetrics-text` + OpenMetricsVersion_0_0_1 = "0.0.1" + OpenMetricsVersion_1_0_0 = "1.0.0" // The Content-Type values for the different wire protocols. - FmtUnknown Format = `` - FmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8` - FmtProtoDelim Format = ProtoFmt + ` encoding=delimited` - FmtProtoText Format = ProtoFmt + ` encoding=text` - FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text` - FmtOpenMetrics Format = OpenMetricsType + `; version=` + OpenMetricsVersion + `; charset=utf-8` + FmtUnknown Format = `` + FmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8` + FmtProtoDelim Format = ProtoFmt + ` encoding=delimited` + FmtProtoText Format = ProtoFmt + ` encoding=text` + FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text` + FmtOpenMetrics_1_0_0 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_1_0_0 + `; charset=utf-8` + FmtOpenMetrics_0_0_1 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_0_0_1 + `; charset=utf-8` ) const ( diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go index ac2482782c7b..35db1cc9d73c 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_parse.go +++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go @@ -24,8 +24,8 @@ import ( dto "github.com/prometheus/client_model/go" - "github.com/golang/protobuf/proto" //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. "github.com/prometheus/common/model" + "google.golang.org/protobuf/proto" ) // A stateFn is a function that represents a state in a state machine. By diff --git a/vendor/golang.org/x/net/context/context.go b/vendor/golang.org/x/net/context/context.go index cf66309c4a8b..db1c95fab1d5 100644 --- a/vendor/golang.org/x/net/context/context.go +++ b/vendor/golang.org/x/net/context/context.go @@ -3,29 +3,31 @@ // license that can be found in the LICENSE file. // Package context defines the Context type, which carries deadlines, -// cancelation signals, and other request-scoped values across API boundaries +// cancellation signals, and other request-scoped values across API boundaries // and between processes. // As of Go 1.7 this package is available in the standard library under the -// name context. https://golang.org/pkg/context. +// name [context], and migrating to it can be done automatically with [go fix]. // -// Incoming requests to a server should create a Context, and outgoing calls to -// servers should accept a Context. The chain of function calls between must -// propagate the Context, optionally replacing it with a modified copy created -// using WithDeadline, WithTimeout, WithCancel, or WithValue. +// Incoming requests to a server should create a [Context], and outgoing +// calls to servers should accept a Context. The chain of function +// calls between them must propagate the Context, optionally replacing +// it with a derived Context created using [WithCancel], [WithDeadline], +// [WithTimeout], or [WithValue]. // // Programs that use Contexts should follow these rules to keep interfaces // consistent across packages and enable static analysis tools to check context // propagation: // // Do not store Contexts inside a struct type; instead, pass a Context -// explicitly to each function that needs it. The Context should be the first +// explicitly to each function that needs it. This is discussed further in +// https://go.dev/blog/context-and-structs. The Context should be the first // parameter, typically named ctx: // // func DoSomething(ctx context.Context, arg Arg) error { // // ... use ctx ... // } // -// Do not pass a nil Context, even if a function permits it. Pass context.TODO +// Do not pass a nil [Context], even if a function permits it. Pass [context.TODO] // if you are unsure about which Context to use. // // Use context Values only for request-scoped data that transits processes and @@ -34,9 +36,30 @@ // The same Context may be passed to functions running in different goroutines; // Contexts are safe for simultaneous use by multiple goroutines. // -// See http://blog.golang.org/context for example code for a server that uses +// See https://go.dev/blog/context for example code for a server that uses // Contexts. -package context // import "golang.org/x/net/context" +// +// [go fix]: https://go.dev/cmd/go#hdr-Update_packages_to_use_new_APIs +package context + +import ( + "context" // standard library's context, as of Go 1.7 + "time" +) + +// A Context carries a deadline, a cancellation signal, and other values across +// API boundaries. +// +// Context's methods may be called by multiple goroutines simultaneously. +type Context = context.Context + +// Canceled is the error returned by [Context.Err] when the context is canceled +// for some reason other than its deadline passing. +var Canceled = context.Canceled + +// DeadlineExceeded is the error returned by [Context.Err] when the context is canceled +// due to its deadline passing. +var DeadlineExceeded = context.DeadlineExceeded // Background returns a non-nil, empty Context. It is never canceled, has no // values, and has no deadline. It is typically used by the main function, @@ -49,8 +72,73 @@ func Background() Context { // TODO returns a non-nil, empty Context. Code should use context.TODO when // it's unclear which Context to use or it is not yet available (because the // surrounding function has not yet been extended to accept a Context -// parameter). TODO is recognized by static analysis tools that determine -// whether Contexts are propagated correctly in a program. +// parameter). func TODO() Context { return todo } + +var ( + background = context.Background() + todo = context.TODO() +) + +// A CancelFunc tells an operation to abandon its work. +// A CancelFunc does not wait for the work to stop. +// A CancelFunc may be called by multiple goroutines simultaneously. +// After the first call, subsequent calls to a CancelFunc do nothing. +type CancelFunc = context.CancelFunc + +// WithCancel returns a derived context that points to the parent context +// but has a new Done channel. The returned context's Done channel is closed +// when the returned cancel function is called or when the parent context's +// Done channel is closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this [Context] complete. +func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { + return context.WithCancel(parent) +} + +// WithDeadline returns a derived context that points to the parent context +// but has the deadline adjusted to be no later than d. If the parent's +// deadline is already earlier than d, WithDeadline(parent, d) is semantically +// equivalent to parent. The returned [Context.Done] channel is closed when +// the deadline expires, when the returned cancel function is called, +// or when the parent context's Done channel is closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this [Context] complete. +func WithDeadline(parent Context, d time.Time) (Context, CancelFunc) { + return context.WithDeadline(parent, d) +} + +// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this [Context] complete: +// +// func slowOperationWithTimeout(ctx context.Context) (Result, error) { +// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) +// defer cancel() // releases resources if slowOperation completes before timeout elapses +// return slowOperation(ctx) +// } +func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { + return context.WithTimeout(parent, timeout) +} + +// WithValue returns a derived context that points to the parent Context. +// In the derived context, the value associated with key is val. +// +// Use context Values only for request-scoped data that transits processes and +// APIs, not for passing optional parameters to functions. +// +// The provided key must be comparable and should not be of type +// string or any other built-in type to avoid collisions between +// packages using context. Users of WithValue should define their own +// types for keys. To avoid allocating when assigning to an +// interface{}, context keys often have concrete type +// struct{}. Alternatively, exported context key variables' static +// type should be a pointer or interface. +func WithValue(parent Context, key, val interface{}) Context { + return context.WithValue(parent, key, val) +} diff --git a/vendor/golang.org/x/net/context/go17.go b/vendor/golang.org/x/net/context/go17.go deleted file mode 100644 index 0c1b8679376a..000000000000 --- a/vendor/golang.org/x/net/context/go17.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.7 - -package context - -import ( - "context" // standard library's context, as of Go 1.7 - "time" -) - -var ( - todo = context.TODO() - background = context.Background() -) - -// Canceled is the error returned by Context.Err when the context is canceled. -var Canceled = context.Canceled - -// DeadlineExceeded is the error returned by Context.Err when the context's -// deadline passes. -var DeadlineExceeded = context.DeadlineExceeded - -// WithCancel returns a copy of parent with a new Done channel. The returned -// context's Done channel is closed when the returned cancel function is called -// or when the parent context's Done channel is closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { - ctx, f := context.WithCancel(parent) - return ctx, f -} - -// WithDeadline returns a copy of the parent context with the deadline adjusted -// to be no later than d. If the parent's deadline is already earlier than d, -// WithDeadline(parent, d) is semantically equivalent to parent. The returned -// context's Done channel is closed when the deadline expires, when the returned -// cancel function is called, or when the parent context's Done channel is -// closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { - ctx, f := context.WithDeadline(parent, deadline) - return ctx, f -} - -// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete: -// -// func slowOperationWithTimeout(ctx context.Context) (Result, error) { -// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) -// defer cancel() // releases resources if slowOperation completes before timeout elapses -// return slowOperation(ctx) -// } -func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { - return WithDeadline(parent, time.Now().Add(timeout)) -} - -// WithValue returns a copy of parent in which the value associated with key is -// val. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -func WithValue(parent Context, key interface{}, val interface{}) Context { - return context.WithValue(parent, key, val) -} diff --git a/vendor/golang.org/x/net/context/go19.go b/vendor/golang.org/x/net/context/go19.go deleted file mode 100644 index e31e35a9045b..000000000000 --- a/vendor/golang.org/x/net/context/go19.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.9 - -package context - -import "context" // standard library's context, as of Go 1.7 - -// A Context carries a deadline, a cancelation signal, and other values across -// API boundaries. -// -// Context's methods may be called by multiple goroutines simultaneously. -type Context = context.Context - -// A CancelFunc tells an operation to abandon its work. -// A CancelFunc does not wait for the work to stop. -// After the first call, subsequent calls to a CancelFunc do nothing. -type CancelFunc = context.CancelFunc diff --git a/vendor/golang.org/x/net/context/pre_go17.go b/vendor/golang.org/x/net/context/pre_go17.go deleted file mode 100644 index 065ff3dfa525..000000000000 --- a/vendor/golang.org/x/net/context/pre_go17.go +++ /dev/null @@ -1,300 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.7 - -package context - -import ( - "errors" - "fmt" - "sync" - "time" -) - -// An emptyCtx is never canceled, has no values, and has no deadline. It is not -// struct{}, since vars of this type must have distinct addresses. -type emptyCtx int - -func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { - return -} - -func (*emptyCtx) Done() <-chan struct{} { - return nil -} - -func (*emptyCtx) Err() error { - return nil -} - -func (*emptyCtx) Value(key interface{}) interface{} { - return nil -} - -func (e *emptyCtx) String() string { - switch e { - case background: - return "context.Background" - case todo: - return "context.TODO" - } - return "unknown empty Context" -} - -var ( - background = new(emptyCtx) - todo = new(emptyCtx) -) - -// Canceled is the error returned by Context.Err when the context is canceled. -var Canceled = errors.New("context canceled") - -// DeadlineExceeded is the error returned by Context.Err when the context's -// deadline passes. -var DeadlineExceeded = errors.New("context deadline exceeded") - -// WithCancel returns a copy of parent with a new Done channel. The returned -// context's Done channel is closed when the returned cancel function is called -// or when the parent context's Done channel is closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { - c := newCancelCtx(parent) - propagateCancel(parent, c) - return c, func() { c.cancel(true, Canceled) } -} - -// newCancelCtx returns an initialized cancelCtx. -func newCancelCtx(parent Context) *cancelCtx { - return &cancelCtx{ - Context: parent, - done: make(chan struct{}), - } -} - -// propagateCancel arranges for child to be canceled when parent is. -func propagateCancel(parent Context, child canceler) { - if parent.Done() == nil { - return // parent is never canceled - } - if p, ok := parentCancelCtx(parent); ok { - p.mu.Lock() - if p.err != nil { - // parent has already been canceled - child.cancel(false, p.err) - } else { - if p.children == nil { - p.children = make(map[canceler]bool) - } - p.children[child] = true - } - p.mu.Unlock() - } else { - go func() { - select { - case <-parent.Done(): - child.cancel(false, parent.Err()) - case <-child.Done(): - } - }() - } -} - -// parentCancelCtx follows a chain of parent references until it finds a -// *cancelCtx. This function understands how each of the concrete types in this -// package represents its parent. -func parentCancelCtx(parent Context) (*cancelCtx, bool) { - for { - switch c := parent.(type) { - case *cancelCtx: - return c, true - case *timerCtx: - return c.cancelCtx, true - case *valueCtx: - parent = c.Context - default: - return nil, false - } - } -} - -// removeChild removes a context from its parent. -func removeChild(parent Context, child canceler) { - p, ok := parentCancelCtx(parent) - if !ok { - return - } - p.mu.Lock() - if p.children != nil { - delete(p.children, child) - } - p.mu.Unlock() -} - -// A canceler is a context type that can be canceled directly. The -// implementations are *cancelCtx and *timerCtx. -type canceler interface { - cancel(removeFromParent bool, err error) - Done() <-chan struct{} -} - -// A cancelCtx can be canceled. When canceled, it also cancels any children -// that implement canceler. -type cancelCtx struct { - Context - - done chan struct{} // closed by the first cancel call. - - mu sync.Mutex - children map[canceler]bool // set to nil by the first cancel call - err error // set to non-nil by the first cancel call -} - -func (c *cancelCtx) Done() <-chan struct{} { - return c.done -} - -func (c *cancelCtx) Err() error { - c.mu.Lock() - defer c.mu.Unlock() - return c.err -} - -func (c *cancelCtx) String() string { - return fmt.Sprintf("%v.WithCancel", c.Context) -} - -// cancel closes c.done, cancels each of c's children, and, if -// removeFromParent is true, removes c from its parent's children. -func (c *cancelCtx) cancel(removeFromParent bool, err error) { - if err == nil { - panic("context: internal error: missing cancel error") - } - c.mu.Lock() - if c.err != nil { - c.mu.Unlock() - return // already canceled - } - c.err = err - close(c.done) - for child := range c.children { - // NOTE: acquiring the child's lock while holding parent's lock. - child.cancel(false, err) - } - c.children = nil - c.mu.Unlock() - - if removeFromParent { - removeChild(c.Context, c) - } -} - -// WithDeadline returns a copy of the parent context with the deadline adjusted -// to be no later than d. If the parent's deadline is already earlier than d, -// WithDeadline(parent, d) is semantically equivalent to parent. The returned -// context's Done channel is closed when the deadline expires, when the returned -// cancel function is called, or when the parent context's Done channel is -// closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { - if cur, ok := parent.Deadline(); ok && cur.Before(deadline) { - // The current deadline is already sooner than the new one. - return WithCancel(parent) - } - c := &timerCtx{ - cancelCtx: newCancelCtx(parent), - deadline: deadline, - } - propagateCancel(parent, c) - d := deadline.Sub(time.Now()) - if d <= 0 { - c.cancel(true, DeadlineExceeded) // deadline has already passed - return c, func() { c.cancel(true, Canceled) } - } - c.mu.Lock() - defer c.mu.Unlock() - if c.err == nil { - c.timer = time.AfterFunc(d, func() { - c.cancel(true, DeadlineExceeded) - }) - } - return c, func() { c.cancel(true, Canceled) } -} - -// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to -// implement Done and Err. It implements cancel by stopping its timer then -// delegating to cancelCtx.cancel. -type timerCtx struct { - *cancelCtx - timer *time.Timer // Under cancelCtx.mu. - - deadline time.Time -} - -func (c *timerCtx) Deadline() (deadline time.Time, ok bool) { - return c.deadline, true -} - -func (c *timerCtx) String() string { - return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now())) -} - -func (c *timerCtx) cancel(removeFromParent bool, err error) { - c.cancelCtx.cancel(false, err) - if removeFromParent { - // Remove this timerCtx from its parent cancelCtx's children. - removeChild(c.cancelCtx.Context, c) - } - c.mu.Lock() - if c.timer != nil { - c.timer.Stop() - c.timer = nil - } - c.mu.Unlock() -} - -// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete: -// -// func slowOperationWithTimeout(ctx context.Context) (Result, error) { -// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) -// defer cancel() // releases resources if slowOperation completes before timeout elapses -// return slowOperation(ctx) -// } -func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { - return WithDeadline(parent, time.Now().Add(timeout)) -} - -// WithValue returns a copy of parent in which the value associated with key is -// val. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -func WithValue(parent Context, key interface{}, val interface{}) Context { - return &valueCtx{parent, key, val} -} - -// A valueCtx carries a key-value pair. It implements Value for that key and -// delegates all other calls to the embedded Context. -type valueCtx struct { - Context - key, val interface{} -} - -func (c *valueCtx) String() string { - return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val) -} - -func (c *valueCtx) Value(key interface{}) interface{} { - if c.key == key { - return c.val - } - return c.Context.Value(key) -} diff --git a/vendor/golang.org/x/net/context/pre_go19.go b/vendor/golang.org/x/net/context/pre_go19.go deleted file mode 100644 index ec5a63803358..000000000000 --- a/vendor/golang.org/x/net/context/pre_go19.go +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.9 - -package context - -import "time" - -// A Context carries a deadline, a cancelation signal, and other values across -// API boundaries. -// -// Context's methods may be called by multiple goroutines simultaneously. -type Context interface { - // Deadline returns the time when work done on behalf of this context - // should be canceled. Deadline returns ok==false when no deadline is - // set. Successive calls to Deadline return the same results. - Deadline() (deadline time.Time, ok bool) - - // Done returns a channel that's closed when work done on behalf of this - // context should be canceled. Done may return nil if this context can - // never be canceled. Successive calls to Done return the same value. - // - // WithCancel arranges for Done to be closed when cancel is called; - // WithDeadline arranges for Done to be closed when the deadline - // expires; WithTimeout arranges for Done to be closed when the timeout - // elapses. - // - // Done is provided for use in select statements: - // - // // Stream generates values with DoSomething and sends them to out - // // until DoSomething returns an error or ctx.Done is closed. - // func Stream(ctx context.Context, out chan<- Value) error { - // for { - // v, err := DoSomething(ctx) - // if err != nil { - // return err - // } - // select { - // case <-ctx.Done(): - // return ctx.Err() - // case out <- v: - // } - // } - // } - // - // See http://blog.golang.org/pipelines for more examples of how to use - // a Done channel for cancelation. - Done() <-chan struct{} - - // Err returns a non-nil error value after Done is closed. Err returns - // Canceled if the context was canceled or DeadlineExceeded if the - // context's deadline passed. No other values for Err are defined. - // After Done is closed, successive calls to Err return the same value. - Err() error - - // Value returns the value associated with this context for key, or nil - // if no value is associated with key. Successive calls to Value with - // the same key returns the same result. - // - // Use context values only for request-scoped data that transits - // processes and API boundaries, not for passing optional parameters to - // functions. - // - // A key identifies a specific value in a Context. Functions that wish - // to store values in Context typically allocate a key in a global - // variable then use that key as the argument to context.WithValue and - // Context.Value. A key can be any type that supports equality; - // packages should define keys as an unexported type to avoid - // collisions. - // - // Packages that define a Context key should provide type-safe accessors - // for the values stores using that key: - // - // // Package user defines a User type that's stored in Contexts. - // package user - // - // import "golang.org/x/net/context" - // - // // User is the type of value stored in the Contexts. - // type User struct {...} - // - // // key is an unexported type for keys defined in this package. - // // This prevents collisions with keys defined in other packages. - // type key int - // - // // userKey is the key for user.User values in Contexts. It is - // // unexported; clients use user.NewContext and user.FromContext - // // instead of using this key directly. - // var userKey key = 0 - // - // // NewContext returns a new Context that carries value u. - // func NewContext(ctx context.Context, u *User) context.Context { - // return context.WithValue(ctx, userKey, u) - // } - // - // // FromContext returns the User value stored in ctx, if any. - // func FromContext(ctx context.Context) (*User, bool) { - // u, ok := ctx.Value(userKey).(*User) - // return u, ok - // } - Value(key interface{}) interface{} -} - -// A CancelFunc tells an operation to abandon its work. -// A CancelFunc does not wait for the work to stop. -// After the first call, subsequent calls to a CancelFunc do nothing. -type CancelFunc func() diff --git a/vendor/golang.org/x/net/http2/config.go b/vendor/golang.org/x/net/http2/config.go index de58dfb8dc49..ca645d9a1aff 100644 --- a/vendor/golang.org/x/net/http2/config.go +++ b/vendor/golang.org/x/net/http2/config.go @@ -60,7 +60,7 @@ func configFromServer(h1 *http.Server, h2 *Server) http2Config { return conf } -// configFromServer merges configuration settings from h2 and h2.t1.HTTP2 +// configFromTransport merges configuration settings from h2 and h2.t1.HTTP2 // (the net/http Transport). func configFromTransport(h2 *Transport) http2Config { conf := http2Config{ diff --git a/vendor/golang.org/x/net/http2/config_go124.go b/vendor/golang.org/x/net/http2/config_go124.go index e3784123c81a..5b516c55fffd 100644 --- a/vendor/golang.org/x/net/http2/config_go124.go +++ b/vendor/golang.org/x/net/http2/config_go124.go @@ -13,7 +13,7 @@ func fillNetHTTPServerConfig(conf *http2Config, srv *http.Server) { fillNetHTTPConfig(conf, srv.HTTP2) } -// fillNetHTTPServerConfig sets fields in conf from tr.HTTP2. +// fillNetHTTPTransportConfig sets fields in conf from tr.HTTP2. func fillNetHTTPTransportConfig(conf *http2Config, tr *http.Transport) { fillNetHTTPConfig(conf, tr.HTTP2) } diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go index 81faec7e75d6..97bd8b06f7ac 100644 --- a/vendor/golang.org/x/net/http2/frame.go +++ b/vendor/golang.org/x/net/http2/frame.go @@ -225,6 +225,11 @@ var fhBytes = sync.Pool{ }, } +func invalidHTTP1LookingFrameHeader() FrameHeader { + fh, _ := readFrameHeader(make([]byte, frameHeaderLen), strings.NewReader("HTTP/1.1 ")) + return fh +} + // ReadFrameHeader reads 9 bytes from r and returns a FrameHeader. // Most users should use Framer.ReadFrame instead. func ReadFrameHeader(r io.Reader) (FrameHeader, error) { @@ -503,10 +508,16 @@ func (fr *Framer) ReadFrame() (Frame, error) { return nil, err } if fh.Length > fr.maxReadSize { + if fh == invalidHTTP1LookingFrameHeader() { + return nil, fmt.Errorf("http2: failed reading the frame payload: %w, note that the frame header looked like an HTTP/1.1 header", err) + } return nil, ErrFrameTooLarge } payload := fr.getReadBuf(fh.Length) if _, err := io.ReadFull(fr.r, payload); err != nil { + if fh == invalidHTTP1LookingFrameHeader() { + return nil, fmt.Errorf("http2: failed reading the frame payload: %w, note that the frame header looked like an HTTP/1.1 header", err) + } return nil, err } f, err := typeFrameParser(fh.Type)(fr.frameCache, fh, fr.countError, payload) diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go index c7601c909ffb..6c18ea230be0 100644 --- a/vendor/golang.org/x/net/http2/http2.go +++ b/vendor/golang.org/x/net/http2/http2.go @@ -34,11 +34,19 @@ import ( ) var ( - VerboseLogs bool - logFrameWrites bool - logFrameReads bool - inTests bool - disableExtendedConnectProtocol bool + VerboseLogs bool + logFrameWrites bool + logFrameReads bool + inTests bool + + // Enabling extended CONNECT by causes browsers to attempt to use + // WebSockets-over-HTTP/2. This results in problems when the server's websocket + // package doesn't support extended CONNECT. + // + // Disable extended CONNECT by default for now. + // + // Issue #71128. + disableExtendedConnectProtocol = true ) func init() { @@ -51,8 +59,8 @@ func init() { logFrameWrites = true logFrameReads = true } - if strings.Contains(e, "http2xconnect=0") { - disableExtendedConnectProtocol = true + if strings.Contains(e, "http2xconnect=1") { + disableExtendedConnectProtocol = false } } @@ -407,23 +415,6 @@ func (s *sorter) SortStrings(ss []string) { s.v = save } -// validPseudoPath reports whether v is a valid :path pseudo-header -// value. It must be either: -// -// - a non-empty string starting with '/' -// - the string '*', for OPTIONS requests. -// -// For now this is only used a quick check for deciding when to clean -// up Opaque URLs before sending requests from the Transport. -// See golang.org/issue/16847 -// -// We used to enforce that the path also didn't start with "//", but -// Google's GFE accepts such paths and Chrome sends them, so ignore -// that part of the spec. See golang.org/issue/19103. -func validPseudoPath(v string) bool { - return (len(v) > 0 && v[0] == '/') || v == "*" -} - // incomparable is a zero-width, non-comparable type. Adding it to a struct // makes that struct also non-comparable, and generally doesn't add // any size (as long as it's first). diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index b55547aec640..51fca38f61d7 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -50,6 +50,7 @@ import ( "golang.org/x/net/http/httpguts" "golang.org/x/net/http2/hpack" + "golang.org/x/net/internal/httpcommon" ) const ( @@ -812,8 +813,7 @@ const maxCachedCanonicalHeadersKeysSize = 2048 func (sc *serverConn) canonicalHeader(v string) string { sc.serveG.check() - buildCommonHeaderMapsOnce() - cv, ok := commonCanonHeader[v] + cv, ok := httpcommon.CachedCanonicalHeader(v) if ok { return cv } @@ -1068,7 +1068,10 @@ func (sc *serverConn) serve(conf http2Config) { func (sc *serverConn) handlePingTimer(lastFrameReadTime time.Time) { if sc.pingSent { - sc.vlogf("timeout waiting for PING response") + sc.logf("timeout waiting for PING response") + if f := sc.countErrorFunc; f != nil { + f("conn_close_lost_ping") + } sc.conn.Close() return } @@ -2233,25 +2236,25 @@ func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*responseWriter, *http.Request, error) { sc.serveG.check() - rp := requestParam{ - method: f.PseudoValue("method"), - scheme: f.PseudoValue("scheme"), - authority: f.PseudoValue("authority"), - path: f.PseudoValue("path"), - protocol: f.PseudoValue("protocol"), + rp := httpcommon.ServerRequestParam{ + Method: f.PseudoValue("method"), + Scheme: f.PseudoValue("scheme"), + Authority: f.PseudoValue("authority"), + Path: f.PseudoValue("path"), + Protocol: f.PseudoValue("protocol"), } // extended connect is disabled, so we should not see :protocol - if disableExtendedConnectProtocol && rp.protocol != "" { + if disableExtendedConnectProtocol && rp.Protocol != "" { return nil, nil, sc.countError("bad_connect", streamError(f.StreamID, ErrCodeProtocol)) } - isConnect := rp.method == "CONNECT" + isConnect := rp.Method == "CONNECT" if isConnect { - if rp.protocol == "" && (rp.path != "" || rp.scheme != "" || rp.authority == "") { + if rp.Protocol == "" && (rp.Path != "" || rp.Scheme != "" || rp.Authority == "") { return nil, nil, sc.countError("bad_connect", streamError(f.StreamID, ErrCodeProtocol)) } - } else if rp.method == "" || rp.path == "" || (rp.scheme != "https" && rp.scheme != "http") { + } else if rp.Method == "" || rp.Path == "" || (rp.Scheme != "https" && rp.Scheme != "http") { // See 8.1.2.6 Malformed Requests and Responses: // // Malformed requests or responses that are detected @@ -2265,15 +2268,16 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res return nil, nil, sc.countError("bad_path_method", streamError(f.StreamID, ErrCodeProtocol)) } - rp.header = make(http.Header) + header := make(http.Header) + rp.Header = header for _, hf := range f.RegularFields() { - rp.header.Add(sc.canonicalHeader(hf.Name), hf.Value) + header.Add(sc.canonicalHeader(hf.Name), hf.Value) } - if rp.authority == "" { - rp.authority = rp.header.Get("Host") + if rp.Authority == "" { + rp.Authority = header.Get("Host") } - if rp.protocol != "" { - rp.header.Set(":protocol", rp.protocol) + if rp.Protocol != "" { + header.Set(":protocol", rp.Protocol) } rw, req, err := sc.newWriterAndRequestNoBody(st, rp) @@ -2282,7 +2286,7 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res } bodyOpen := !f.StreamEnded() if bodyOpen { - if vv, ok := rp.header["Content-Length"]; ok { + if vv, ok := rp.Header["Content-Length"]; ok { if cl, err := strconv.ParseUint(vv[0], 10, 63); err == nil { req.ContentLength = int64(cl) } else { @@ -2298,84 +2302,38 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res return rw, req, nil } -type requestParam struct { - method string - scheme, authority, path string - protocol string - header http.Header -} - -func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*responseWriter, *http.Request, error) { +func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp httpcommon.ServerRequestParam) (*responseWriter, *http.Request, error) { sc.serveG.check() var tlsState *tls.ConnectionState // nil if not scheme https - if rp.scheme == "https" { + if rp.Scheme == "https" { tlsState = sc.tlsState } - needsContinue := httpguts.HeaderValuesContainsToken(rp.header["Expect"], "100-continue") - if needsContinue { - rp.header.Del("Expect") - } - // Merge Cookie headers into one "; "-delimited value. - if cookies := rp.header["Cookie"]; len(cookies) > 1 { - rp.header.Set("Cookie", strings.Join(cookies, "; ")) - } - - // Setup Trailers - var trailer http.Header - for _, v := range rp.header["Trailer"] { - for _, key := range strings.Split(v, ",") { - key = http.CanonicalHeaderKey(textproto.TrimString(key)) - switch key { - case "Transfer-Encoding", "Trailer", "Content-Length": - // Bogus. (copy of http1 rules) - // Ignore. - default: - if trailer == nil { - trailer = make(http.Header) - } - trailer[key] = nil - } - } - } - delete(rp.header, "Trailer") - - var url_ *url.URL - var requestURI string - if rp.method == "CONNECT" && rp.protocol == "" { - url_ = &url.URL{Host: rp.authority} - requestURI = rp.authority // mimic HTTP/1 server behavior - } else { - var err error - url_, err = url.ParseRequestURI(rp.path) - if err != nil { - return nil, nil, sc.countError("bad_path", streamError(st.id, ErrCodeProtocol)) - } - requestURI = rp.path + res := httpcommon.NewServerRequest(rp) + if res.InvalidReason != "" { + return nil, nil, sc.countError(res.InvalidReason, streamError(st.id, ErrCodeProtocol)) } body := &requestBody{ conn: sc, stream: st, - needsContinue: needsContinue, + needsContinue: res.NeedsContinue, } - req := &http.Request{ - Method: rp.method, - URL: url_, + req := (&http.Request{ + Method: rp.Method, + URL: res.URL, RemoteAddr: sc.remoteAddrStr, - Header: rp.header, - RequestURI: requestURI, + Header: rp.Header, + RequestURI: res.RequestURI, Proto: "HTTP/2.0", ProtoMajor: 2, ProtoMinor: 0, TLS: tlsState, - Host: rp.authority, + Host: rp.Authority, Body: body, - Trailer: trailer, - } - req = req.WithContext(st.ctx) - + Trailer: res.Trailer, + }).WithContext(st.ctx) rw := sc.newResponseWriter(st, req) return rw, req, nil } @@ -3270,12 +3228,12 @@ func (sc *serverConn) startPush(msg *startPushRequest) { // we start in "half closed (remote)" for simplicity. // See further comments at the definition of stateHalfClosedRemote. promised := sc.newStream(promisedID, msg.parent.id, stateHalfClosedRemote) - rw, req, err := sc.newWriterAndRequestNoBody(promised, requestParam{ - method: msg.method, - scheme: msg.url.Scheme, - authority: msg.url.Host, - path: msg.url.RequestURI(), - header: cloneHeader(msg.header), // clone since handler runs concurrently with writing the PUSH_PROMISE + rw, req, err := sc.newWriterAndRequestNoBody(promised, httpcommon.ServerRequestParam{ + Method: msg.method, + Scheme: msg.url.Scheme, + Authority: msg.url.Host, + Path: msg.url.RequestURI(), + Header: cloneHeader(msg.header), // clone since handler runs concurrently with writing the PUSH_PROMISE }) if err != nil { // Should not happen, since we've already validated msg.url. diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index 090d0e1bdb5d..f26356b9cd91 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -25,7 +25,6 @@ import ( "net/http" "net/http/httptrace" "net/textproto" - "sort" "strconv" "strings" "sync" @@ -35,6 +34,7 @@ import ( "golang.org/x/net/http/httpguts" "golang.org/x/net/http2/hpack" "golang.org/x/net/idna" + "golang.org/x/net/internal/httpcommon" ) const ( @@ -375,6 +375,7 @@ type ClientConn struct { doNotReuse bool // whether conn is marked to not be reused for any future requests closing bool closed bool + closedOnIdle bool // true if conn was closed for idleness seenSettings bool // true if we've seen a settings frame, false otherwise seenSettingsChan chan struct{} // closed when seenSettings is true or frame reading fails wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back @@ -1089,10 +1090,12 @@ func (cc *ClientConn) idleStateLocked() (st clientConnIdleState) { // If this connection has never been used for a request and is closed, // then let it take a request (which will fail). + // If the conn was closed for idleness, we're racing the idle timer; + // don't try to use the conn. (Issue #70515.) // // This avoids a situation where an error early in a connection's lifetime // goes unreported. - if cc.nextStreamID == 1 && cc.streamsReserved == 0 && cc.closed { + if cc.nextStreamID == 1 && cc.streamsReserved == 0 && cc.closed && !cc.closedOnIdle { st.canTakeNewRequest = true } @@ -1155,6 +1158,7 @@ func (cc *ClientConn) closeIfIdle() { return } cc.closed = true + cc.closedOnIdle = true nextID := cc.nextStreamID // TODO: do clients send GOAWAY too? maybe? Just Close: cc.mu.Unlock() @@ -1271,23 +1275,6 @@ func (cc *ClientConn) closeForLostPing() { // exported. At least they'll be DeepEqual for h1-vs-h2 comparisons tests. var errRequestCanceled = errors.New("net/http: request canceled") -func commaSeparatedTrailers(req *http.Request) (string, error) { - keys := make([]string, 0, len(req.Trailer)) - for k := range req.Trailer { - k = canonicalHeader(k) - switch k { - case "Transfer-Encoding", "Trailer", "Content-Length": - return "", fmt.Errorf("invalid Trailer key %q", k) - } - keys = append(keys, k) - } - if len(keys) > 0 { - sort.Strings(keys) - return strings.Join(keys, ","), nil - } - return "", nil -} - func (cc *ClientConn) responseHeaderTimeout() time.Duration { if cc.t.t1 != nil { return cc.t.t1.ResponseHeaderTimeout @@ -1299,22 +1286,6 @@ func (cc *ClientConn) responseHeaderTimeout() time.Duration { return 0 } -// checkConnHeaders checks whether req has any invalid connection-level headers. -// per RFC 7540 section 8.1.2.2: Connection-Specific Header Fields. -// Certain headers are special-cased as okay but not transmitted later. -func checkConnHeaders(req *http.Request) error { - if v := req.Header.Get("Upgrade"); v != "" { - return fmt.Errorf("http2: invalid Upgrade request header: %q", req.Header["Upgrade"]) - } - if vv := req.Header["Transfer-Encoding"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "chunked") { - return fmt.Errorf("http2: invalid Transfer-Encoding request header: %q", vv) - } - if vv := req.Header["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && !asciiEqualFold(vv[0], "close") && !asciiEqualFold(vv[0], "keep-alive")) { - return fmt.Errorf("http2: invalid Connection request header: %q", vv) - } - return nil -} - // actualContentLength returns a sanitized version of // req.ContentLength, where 0 actually means zero (not unknown) and -1 // means unknown. @@ -1360,25 +1331,7 @@ func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream)) donec: make(chan struct{}), } - // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere? - if !cc.t.disableCompression() && - req.Header.Get("Accept-Encoding") == "" && - req.Header.Get("Range") == "" && - !cs.isHead { - // Request gzip only, not deflate. Deflate is ambiguous and - // not as universally supported anyway. - // See: https://zlib.net/zlib_faq.html#faq39 - // - // Note that we don't request this for HEAD requests, - // due to a bug in nginx: - // http://trac.nginx.org/nginx/ticket/358 - // https://golang.org/issue/5522 - // - // We don't request gzip if the request is for a range, since - // auto-decoding a portion of a gzipped document will just fail - // anyway. See https://golang.org/issue/8923 - cs.requestedGzip = true - } + cs.requestedGzip = httpcommon.IsRequestGzip(req.Method, req.Header, cc.t.disableCompression()) go cs.doRequest(req, streamf) @@ -1492,10 +1445,6 @@ func (cs *clientStream) writeRequest(req *http.Request, streamf func(*clientStre cc := cs.cc ctx := cs.ctx - if err := checkConnHeaders(req); err != nil { - return err - } - // wait for setting frames to be received, a server can change this value later, // but we just wait for the first settings frame var isExtendedConnect bool @@ -1659,26 +1608,39 @@ func (cs *clientStream) encodeAndWriteHeaders(req *http.Request) error { // we send: HEADERS{1}, CONTINUATION{0,} + DATA{0,} (DATA is // sent by writeRequestBody below, along with any Trailers, // again in form HEADERS{1}, CONTINUATION{0,}) - trailers, err := commaSeparatedTrailers(req) - if err != nil { - return err - } - hasTrailers := trailers != "" - contentLen := actualContentLength(req) - hasBody := contentLen != 0 - hdrs, err := cc.encodeHeaders(req, cs.requestedGzip, trailers, contentLen) + cc.hbuf.Reset() + res, err := encodeRequestHeaders(req, cs.requestedGzip, cc.peerMaxHeaderListSize, func(name, value string) { + cc.writeHeader(name, value) + }) if err != nil { - return err + return fmt.Errorf("http2: %w", err) } + hdrs := cc.hbuf.Bytes() // Write the request. - endStream := !hasBody && !hasTrailers + endStream := !res.HasBody && !res.HasTrailers cs.sentHeaders = true err = cc.writeHeaders(cs.ID, endStream, int(cc.maxFrameSize), hdrs) traceWroteHeaders(cs.trace) return err } +func encodeRequestHeaders(req *http.Request, addGzipHeader bool, peerMaxHeaderListSize uint64, headerf func(name, value string)) (httpcommon.EncodeHeadersResult, error) { + return httpcommon.EncodeHeaders(req.Context(), httpcommon.EncodeHeadersParam{ + Request: httpcommon.Request{ + Header: req.Header, + Trailer: req.Trailer, + URL: req.URL, + Host: req.Host, + Method: req.Method, + ActualContentLength: actualContentLength(req), + }, + AddGzipHeader: addGzipHeader, + PeerMaxHeaderListSize: peerMaxHeaderListSize, + DefaultUserAgent: defaultUserAgent, + }, headerf) +} + // cleanupWriteRequest performs post-request tasks. // // If err (the result of writeRequest) is non-nil and the stream is not closed, @@ -2066,218 +2028,6 @@ func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) } } -func validateHeaders(hdrs http.Header) string { - for k, vv := range hdrs { - if !httpguts.ValidHeaderFieldName(k) && k != ":protocol" { - return fmt.Sprintf("name %q", k) - } - for _, v := range vv { - if !httpguts.ValidHeaderFieldValue(v) { - // Don't include the value in the error, - // because it may be sensitive. - return fmt.Sprintf("value for header %q", k) - } - } - } - return "" -} - -var errNilRequestURL = errors.New("http2: Request.URI is nil") - -func isNormalConnect(req *http.Request) bool { - return req.Method == "CONNECT" && req.Header.Get(":protocol") == "" -} - -// requires cc.wmu be held. -func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trailers string, contentLength int64) ([]byte, error) { - cc.hbuf.Reset() - if req.URL == nil { - return nil, errNilRequestURL - } - - host := req.Host - if host == "" { - host = req.URL.Host - } - host, err := httpguts.PunycodeHostPort(host) - if err != nil { - return nil, err - } - if !httpguts.ValidHostHeader(host) { - return nil, errors.New("http2: invalid Host header") - } - - var path string - if !isNormalConnect(req) { - path = req.URL.RequestURI() - if !validPseudoPath(path) { - orig := path - path = strings.TrimPrefix(path, req.URL.Scheme+"://"+host) - if !validPseudoPath(path) { - if req.URL.Opaque != "" { - return nil, fmt.Errorf("invalid request :path %q from URL.Opaque = %q", orig, req.URL.Opaque) - } else { - return nil, fmt.Errorf("invalid request :path %q", orig) - } - } - } - } - - // Check for any invalid headers+trailers and return an error before we - // potentially pollute our hpack state. (We want to be able to - // continue to reuse the hpack encoder for future requests) - if err := validateHeaders(req.Header); err != "" { - return nil, fmt.Errorf("invalid HTTP header %s", err) - } - if err := validateHeaders(req.Trailer); err != "" { - return nil, fmt.Errorf("invalid HTTP trailer %s", err) - } - - enumerateHeaders := func(f func(name, value string)) { - // 8.1.2.3 Request Pseudo-Header Fields - // The :path pseudo-header field includes the path and query parts of the - // target URI (the path-absolute production and optionally a '?' character - // followed by the query production, see Sections 3.3 and 3.4 of - // [RFC3986]). - f(":authority", host) - m := req.Method - if m == "" { - m = http.MethodGet - } - f(":method", m) - if !isNormalConnect(req) { - f(":path", path) - f(":scheme", req.URL.Scheme) - } - if trailers != "" { - f("trailer", trailers) - } - - var didUA bool - for k, vv := range req.Header { - if asciiEqualFold(k, "host") || asciiEqualFold(k, "content-length") { - // Host is :authority, already sent. - // Content-Length is automatic, set below. - continue - } else if asciiEqualFold(k, "connection") || - asciiEqualFold(k, "proxy-connection") || - asciiEqualFold(k, "transfer-encoding") || - asciiEqualFold(k, "upgrade") || - asciiEqualFold(k, "keep-alive") { - // Per 8.1.2.2 Connection-Specific Header - // Fields, don't send connection-specific - // fields. We have already checked if any - // are error-worthy so just ignore the rest. - continue - } else if asciiEqualFold(k, "user-agent") { - // Match Go's http1 behavior: at most one - // User-Agent. If set to nil or empty string, - // then omit it. Otherwise if not mentioned, - // include the default (below). - didUA = true - if len(vv) < 1 { - continue - } - vv = vv[:1] - if vv[0] == "" { - continue - } - } else if asciiEqualFold(k, "cookie") { - // Per 8.1.2.5 To allow for better compression efficiency, the - // Cookie header field MAY be split into separate header fields, - // each with one or more cookie-pairs. - for _, v := range vv { - for { - p := strings.IndexByte(v, ';') - if p < 0 { - break - } - f("cookie", v[:p]) - p++ - // strip space after semicolon if any. - for p+1 <= len(v) && v[p] == ' ' { - p++ - } - v = v[p:] - } - if len(v) > 0 { - f("cookie", v) - } - } - continue - } - - for _, v := range vv { - f(k, v) - } - } - if shouldSendReqContentLength(req.Method, contentLength) { - f("content-length", strconv.FormatInt(contentLength, 10)) - } - if addGzipHeader { - f("accept-encoding", "gzip") - } - if !didUA { - f("user-agent", defaultUserAgent) - } - } - - // Do a first pass over the headers counting bytes to ensure - // we don't exceed cc.peerMaxHeaderListSize. This is done as a - // separate pass before encoding the headers to prevent - // modifying the hpack state. - hlSize := uint64(0) - enumerateHeaders(func(name, value string) { - hf := hpack.HeaderField{Name: name, Value: value} - hlSize += uint64(hf.Size()) - }) - - if hlSize > cc.peerMaxHeaderListSize { - return nil, errRequestHeaderListSize - } - - trace := httptrace.ContextClientTrace(req.Context()) - traceHeaders := traceHasWroteHeaderField(trace) - - // Header list size is ok. Write the headers. - enumerateHeaders(func(name, value string) { - name, ascii := lowerHeader(name) - if !ascii { - // Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header - // field names have to be ASCII characters (just as in HTTP/1.x). - return - } - cc.writeHeader(name, value) - if traceHeaders { - traceWroteHeaderField(trace, name, value) - } - }) - - return cc.hbuf.Bytes(), nil -} - -// shouldSendReqContentLength reports whether the http2.Transport should send -// a "content-length" request header. This logic is basically a copy of the net/http -// transferWriter.shouldSendContentLength. -// The contentLength is the corrected contentLength (so 0 means actually 0, not unknown). -// -1 means unknown. -func shouldSendReqContentLength(method string, contentLength int64) bool { - if contentLength > 0 { - return true - } - if contentLength < 0 { - return false - } - // For zero bodies, whether we send a content-length depends on the method. - // It also kinda doesn't matter for http2 either way, with END_STREAM. - switch method { - case "POST", "PUT", "PATCH": - return true - default: - return false - } -} - // requires cc.wmu be held. func (cc *ClientConn) encodeTrailers(trailer http.Header) ([]byte, error) { cc.hbuf.Reset() @@ -2294,7 +2044,7 @@ func (cc *ClientConn) encodeTrailers(trailer http.Header) ([]byte, error) { } for k, vv := range trailer { - lowKey, ascii := lowerHeader(k) + lowKey, ascii := httpcommon.LowerHeader(k) if !ascii { // Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header // field names have to be ASCII characters (just as in HTTP/1.x). @@ -2434,9 +2184,12 @@ func (rl *clientConnReadLoop) cleanup() { // This avoids a situation where new connections are constantly created, // added to the pool, fail, and are removed from the pool, without any error // being surfaced to the user. - const unusedWaitTime = 5 * time.Second + unusedWaitTime := 5 * time.Second + if cc.idleTimeout > 0 && unusedWaitTime > cc.idleTimeout { + unusedWaitTime = cc.idleTimeout + } idleTime := cc.t.now().Sub(cc.lastActive) - if atomic.LoadUint32(&cc.atomicReused) == 0 && idleTime < unusedWaitTime { + if atomic.LoadUint32(&cc.atomicReused) == 0 && idleTime < unusedWaitTime && !cc.closedOnIdle { cc.idleTimer = cc.t.afterFunc(unusedWaitTime-idleTime, func() { cc.t.connPool().MarkDead(cc) }) @@ -2457,6 +2210,13 @@ func (rl *clientConnReadLoop) cleanup() { } cc.cond.Broadcast() cc.mu.Unlock() + + if !cc.seenSettings { + // If we have a pending request that wants extended CONNECT, + // let it continue and fail with the connection error. + cc.extendedConnectAllowed = true + close(cc.seenSettingsChan) + } } // countReadFrameError calls Transport.CountError with a string @@ -2549,9 +2309,6 @@ func (rl *clientConnReadLoop) run() error { if VerboseLogs { cc.vlogf("http2: Transport conn %p received error from processing frame %v: %v", cc, summarizeFrame(f), err) } - if !cc.seenSettings { - close(cc.seenSettingsChan) - } return err } } @@ -2646,7 +2403,7 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra Status: status + " " + http.StatusText(statusCode), } for _, hf := range regularFields { - key := canonicalHeader(hf.Name) + key := httpcommon.CanonicalHeader(hf.Name) if key == "Trailer" { t := res.Trailer if t == nil { @@ -2654,7 +2411,7 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra res.Trailer = t } foreachHeaderElement(hf.Value, func(v string) { - t[canonicalHeader(v)] = nil + t[httpcommon.CanonicalHeader(v)] = nil }) } else { vv := header[key] @@ -2778,7 +2535,7 @@ func (rl *clientConnReadLoop) processTrailers(cs *clientStream, f *MetaHeadersFr trailer := make(http.Header) for _, hf := range f.RegularFields() { - key := canonicalHeader(hf.Name) + key := httpcommon.CanonicalHeader(hf.Name) trailer[key] = append(trailer[key], hf.Value) } cs.trailer = trailer @@ -3324,7 +3081,7 @@ func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, ping bool, var ( errResponseHeaderListSize = errors.New("http2: response header list larger than advertised limit") - errRequestHeaderListSize = errors.New("http2: request header list larger than peer's advertised limit") + errRequestHeaderListSize = httpcommon.ErrRequestHeaderListSize ) func (cc *ClientConn) logf(format string, args ...interface{}) { @@ -3508,16 +3265,6 @@ func traceFirstResponseByte(trace *httptrace.ClientTrace) { } } -func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool { - return trace != nil && trace.WroteHeaderField != nil -} - -func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) { - if trace != nil && trace.WroteHeaderField != nil { - trace.WroteHeaderField(k, []string{v}) - } -} - func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error { if trace != nil { return trace.Got1xxResponse diff --git a/vendor/golang.org/x/net/http2/write.go b/vendor/golang.org/x/net/http2/write.go index 6ff6bee7e954..fdb35b9477eb 100644 --- a/vendor/golang.org/x/net/http2/write.go +++ b/vendor/golang.org/x/net/http2/write.go @@ -13,6 +13,7 @@ import ( "golang.org/x/net/http/httpguts" "golang.org/x/net/http2/hpack" + "golang.org/x/net/internal/httpcommon" ) // writeFramer is implemented by any type that is used to write frames. @@ -351,7 +352,7 @@ func encodeHeaders(enc *hpack.Encoder, h http.Header, keys []string) { } for _, k := range keys { vv := h[k] - k, ascii := lowerHeader(k) + k, ascii := httpcommon.LowerHeader(k) if !ascii { // Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header // field names have to be ASCII characters (just as in HTTP/1.x). diff --git a/vendor/golang.org/x/net/internal/httpcommon/ascii.go b/vendor/golang.org/x/net/internal/httpcommon/ascii.go new file mode 100644 index 000000000000..ed14da5afccc --- /dev/null +++ b/vendor/golang.org/x/net/internal/httpcommon/ascii.go @@ -0,0 +1,53 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package httpcommon + +import "strings" + +// The HTTP protocols are defined in terms of ASCII, not Unicode. This file +// contains helper functions which may use Unicode-aware functions which would +// otherwise be unsafe and could introduce vulnerabilities if used improperly. + +// asciiEqualFold is strings.EqualFold, ASCII only. It reports whether s and t +// are equal, ASCII-case-insensitively. +func asciiEqualFold(s, t string) bool { + if len(s) != len(t) { + return false + } + for i := 0; i < len(s); i++ { + if lower(s[i]) != lower(t[i]) { + return false + } + } + return true +} + +// lower returns the ASCII lowercase version of b. +func lower(b byte) byte { + if 'A' <= b && b <= 'Z' { + return b + ('a' - 'A') + } + return b +} + +// isASCIIPrint returns whether s is ASCII and printable according to +// https://tools.ietf.org/html/rfc20#section-4.2. +func isASCIIPrint(s string) bool { + for i := 0; i < len(s); i++ { + if s[i] < ' ' || s[i] > '~' { + return false + } + } + return true +} + +// asciiToLower returns the lowercase version of s if s is ASCII and printable, +// and whether or not it was. +func asciiToLower(s string) (lower string, ok bool) { + if !isASCIIPrint(s) { + return "", false + } + return strings.ToLower(s), true +} diff --git a/vendor/golang.org/x/net/http2/headermap.go b/vendor/golang.org/x/net/internal/httpcommon/headermap.go similarity index 74% rename from vendor/golang.org/x/net/http2/headermap.go rename to vendor/golang.org/x/net/internal/httpcommon/headermap.go index 149b3dd20e45..92483d8e41a4 100644 --- a/vendor/golang.org/x/net/http2/headermap.go +++ b/vendor/golang.org/x/net/internal/httpcommon/headermap.go @@ -1,11 +1,11 @@ -// Copyright 2014 The Go Authors. All rights reserved. +// Copyright 2025 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package http2 +package httpcommon import ( - "net/http" + "net/textproto" "sync" ) @@ -82,13 +82,15 @@ func buildCommonHeaderMaps() { commonLowerHeader = make(map[string]string, len(common)) commonCanonHeader = make(map[string]string, len(common)) for _, v := range common { - chk := http.CanonicalHeaderKey(v) + chk := textproto.CanonicalMIMEHeaderKey(v) commonLowerHeader[chk] = v commonCanonHeader[v] = chk } } -func lowerHeader(v string) (lower string, ascii bool) { +// LowerHeader returns the lowercase form of a header name, +// used on the wire for HTTP/2 and HTTP/3 requests. +func LowerHeader(v string) (lower string, ascii bool) { buildCommonHeaderMapsOnce() if s, ok := commonLowerHeader[v]; ok { return s, true @@ -96,10 +98,18 @@ func lowerHeader(v string) (lower string, ascii bool) { return asciiToLower(v) } -func canonicalHeader(v string) string { +// CanonicalHeader canonicalizes a header name. (For example, "host" becomes "Host".) +func CanonicalHeader(v string) string { buildCommonHeaderMapsOnce() if s, ok := commonCanonHeader[v]; ok { return s } - return http.CanonicalHeaderKey(v) + return textproto.CanonicalMIMEHeaderKey(v) +} + +// CachedCanonicalHeader returns the canonical form of a well-known header name. +func CachedCanonicalHeader(v string) (string, bool) { + buildCommonHeaderMapsOnce() + s, ok := commonCanonHeader[v] + return s, ok } diff --git a/vendor/golang.org/x/net/internal/httpcommon/request.go b/vendor/golang.org/x/net/internal/httpcommon/request.go new file mode 100644 index 000000000000..4b705531793c --- /dev/null +++ b/vendor/golang.org/x/net/internal/httpcommon/request.go @@ -0,0 +1,467 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package httpcommon + +import ( + "context" + "errors" + "fmt" + "net/http/httptrace" + "net/textproto" + "net/url" + "sort" + "strconv" + "strings" + + "golang.org/x/net/http/httpguts" + "golang.org/x/net/http2/hpack" +) + +var ( + ErrRequestHeaderListSize = errors.New("request header list larger than peer's advertised limit") +) + +// Request is a subset of http.Request. +// It'd be simpler to pass an *http.Request, of course, but we can't depend on net/http +// without creating a dependency cycle. +type Request struct { + URL *url.URL + Method string + Host string + Header map[string][]string + Trailer map[string][]string + ActualContentLength int64 // 0 means 0, -1 means unknown +} + +// EncodeHeadersParam is parameters to EncodeHeaders. +type EncodeHeadersParam struct { + Request Request + + // AddGzipHeader indicates that an "accept-encoding: gzip" header should be + // added to the request. + AddGzipHeader bool + + // PeerMaxHeaderListSize, when non-zero, is the peer's MAX_HEADER_LIST_SIZE setting. + PeerMaxHeaderListSize uint64 + + // DefaultUserAgent is the User-Agent header to send when the request + // neither contains a User-Agent nor disables it. + DefaultUserAgent string +} + +// EncodeHeadersParam is the result of EncodeHeaders. +type EncodeHeadersResult struct { + HasBody bool + HasTrailers bool +} + +// EncodeHeaders constructs request headers common to HTTP/2 and HTTP/3. +// It validates a request and calls headerf with each pseudo-header and header +// for the request. +// The headerf function is called with the validated, canonicalized header name. +func EncodeHeaders(ctx context.Context, param EncodeHeadersParam, headerf func(name, value string)) (res EncodeHeadersResult, _ error) { + req := param.Request + + // Check for invalid connection-level headers. + if err := checkConnHeaders(req.Header); err != nil { + return res, err + } + + if req.URL == nil { + return res, errors.New("Request.URL is nil") + } + + host := req.Host + if host == "" { + host = req.URL.Host + } + host, err := httpguts.PunycodeHostPort(host) + if err != nil { + return res, err + } + if !httpguts.ValidHostHeader(host) { + return res, errors.New("invalid Host header") + } + + // isNormalConnect is true if this is a non-extended CONNECT request. + isNormalConnect := false + var protocol string + if vv := req.Header[":protocol"]; len(vv) > 0 { + protocol = vv[0] + } + if req.Method == "CONNECT" && protocol == "" { + isNormalConnect = true + } else if protocol != "" && req.Method != "CONNECT" { + return res, errors.New("invalid :protocol header in non-CONNECT request") + } + + // Validate the path, except for non-extended CONNECT requests which have no path. + var path string + if !isNormalConnect { + path = req.URL.RequestURI() + if !validPseudoPath(path) { + orig := path + path = strings.TrimPrefix(path, req.URL.Scheme+"://"+host) + if !validPseudoPath(path) { + if req.URL.Opaque != "" { + return res, fmt.Errorf("invalid request :path %q from URL.Opaque = %q", orig, req.URL.Opaque) + } else { + return res, fmt.Errorf("invalid request :path %q", orig) + } + } + } + } + + // Check for any invalid headers+trailers and return an error before we + // potentially pollute our hpack state. (We want to be able to + // continue to reuse the hpack encoder for future requests) + if err := validateHeaders(req.Header); err != "" { + return res, fmt.Errorf("invalid HTTP header %s", err) + } + if err := validateHeaders(req.Trailer); err != "" { + return res, fmt.Errorf("invalid HTTP trailer %s", err) + } + + trailers, err := commaSeparatedTrailers(req.Trailer) + if err != nil { + return res, err + } + + enumerateHeaders := func(f func(name, value string)) { + // 8.1.2.3 Request Pseudo-Header Fields + // The :path pseudo-header field includes the path and query parts of the + // target URI (the path-absolute production and optionally a '?' character + // followed by the query production, see Sections 3.3 and 3.4 of + // [RFC3986]). + f(":authority", host) + m := req.Method + if m == "" { + m = "GET" + } + f(":method", m) + if !isNormalConnect { + f(":path", path) + f(":scheme", req.URL.Scheme) + } + if protocol != "" { + f(":protocol", protocol) + } + if trailers != "" { + f("trailer", trailers) + } + + var didUA bool + for k, vv := range req.Header { + if asciiEqualFold(k, "host") || asciiEqualFold(k, "content-length") { + // Host is :authority, already sent. + // Content-Length is automatic, set below. + continue + } else if asciiEqualFold(k, "connection") || + asciiEqualFold(k, "proxy-connection") || + asciiEqualFold(k, "transfer-encoding") || + asciiEqualFold(k, "upgrade") || + asciiEqualFold(k, "keep-alive") { + // Per 8.1.2.2 Connection-Specific Header + // Fields, don't send connection-specific + // fields. We have already checked if any + // are error-worthy so just ignore the rest. + continue + } else if asciiEqualFold(k, "user-agent") { + // Match Go's http1 behavior: at most one + // User-Agent. If set to nil or empty string, + // then omit it. Otherwise if not mentioned, + // include the default (below). + didUA = true + if len(vv) < 1 { + continue + } + vv = vv[:1] + if vv[0] == "" { + continue + } + } else if asciiEqualFold(k, "cookie") { + // Per 8.1.2.5 To allow for better compression efficiency, the + // Cookie header field MAY be split into separate header fields, + // each with one or more cookie-pairs. + for _, v := range vv { + for { + p := strings.IndexByte(v, ';') + if p < 0 { + break + } + f("cookie", v[:p]) + p++ + // strip space after semicolon if any. + for p+1 <= len(v) && v[p] == ' ' { + p++ + } + v = v[p:] + } + if len(v) > 0 { + f("cookie", v) + } + } + continue + } else if k == ":protocol" { + // :protocol pseudo-header was already sent above. + continue + } + + for _, v := range vv { + f(k, v) + } + } + if shouldSendReqContentLength(req.Method, req.ActualContentLength) { + f("content-length", strconv.FormatInt(req.ActualContentLength, 10)) + } + if param.AddGzipHeader { + f("accept-encoding", "gzip") + } + if !didUA { + f("user-agent", param.DefaultUserAgent) + } + } + + // Do a first pass over the headers counting bytes to ensure + // we don't exceed cc.peerMaxHeaderListSize. This is done as a + // separate pass before encoding the headers to prevent + // modifying the hpack state. + if param.PeerMaxHeaderListSize > 0 { + hlSize := uint64(0) + enumerateHeaders(func(name, value string) { + hf := hpack.HeaderField{Name: name, Value: value} + hlSize += uint64(hf.Size()) + }) + + if hlSize > param.PeerMaxHeaderListSize { + return res, ErrRequestHeaderListSize + } + } + + trace := httptrace.ContextClientTrace(ctx) + + // Header list size is ok. Write the headers. + enumerateHeaders(func(name, value string) { + name, ascii := LowerHeader(name) + if !ascii { + // Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header + // field names have to be ASCII characters (just as in HTTP/1.x). + return + } + + headerf(name, value) + + if trace != nil && trace.WroteHeaderField != nil { + trace.WroteHeaderField(name, []string{value}) + } + }) + + res.HasBody = req.ActualContentLength != 0 + res.HasTrailers = trailers != "" + return res, nil +} + +// IsRequestGzip reports whether we should add an Accept-Encoding: gzip header +// for a request. +func IsRequestGzip(method string, header map[string][]string, disableCompression bool) bool { + // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere? + if !disableCompression && + len(header["Accept-Encoding"]) == 0 && + len(header["Range"]) == 0 && + method != "HEAD" { + // Request gzip only, not deflate. Deflate is ambiguous and + // not as universally supported anyway. + // See: https://zlib.net/zlib_faq.html#faq39 + // + // Note that we don't request this for HEAD requests, + // due to a bug in nginx: + // http://trac.nginx.org/nginx/ticket/358 + // https://golang.org/issue/5522 + // + // We don't request gzip if the request is for a range, since + // auto-decoding a portion of a gzipped document will just fail + // anyway. See https://golang.org/issue/8923 + return true + } + return false +} + +// checkConnHeaders checks whether req has any invalid connection-level headers. +// +// https://www.rfc-editor.org/rfc/rfc9114.html#section-4.2-3 +// https://www.rfc-editor.org/rfc/rfc9113.html#section-8.2.2-1 +// +// Certain headers are special-cased as okay but not transmitted later. +// For example, we allow "Transfer-Encoding: chunked", but drop the header when encoding. +func checkConnHeaders(h map[string][]string) error { + if vv := h["Upgrade"]; len(vv) > 0 && (vv[0] != "" && vv[0] != "chunked") { + return fmt.Errorf("invalid Upgrade request header: %q", vv) + } + if vv := h["Transfer-Encoding"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "chunked") { + return fmt.Errorf("invalid Transfer-Encoding request header: %q", vv) + } + if vv := h["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && !asciiEqualFold(vv[0], "close") && !asciiEqualFold(vv[0], "keep-alive")) { + return fmt.Errorf("invalid Connection request header: %q", vv) + } + return nil +} + +func commaSeparatedTrailers(trailer map[string][]string) (string, error) { + keys := make([]string, 0, len(trailer)) + for k := range trailer { + k = CanonicalHeader(k) + switch k { + case "Transfer-Encoding", "Trailer", "Content-Length": + return "", fmt.Errorf("invalid Trailer key %q", k) + } + keys = append(keys, k) + } + if len(keys) > 0 { + sort.Strings(keys) + return strings.Join(keys, ","), nil + } + return "", nil +} + +// validPseudoPath reports whether v is a valid :path pseudo-header +// value. It must be either: +// +// - a non-empty string starting with '/' +// - the string '*', for OPTIONS requests. +// +// For now this is only used a quick check for deciding when to clean +// up Opaque URLs before sending requests from the Transport. +// See golang.org/issue/16847 +// +// We used to enforce that the path also didn't start with "//", but +// Google's GFE accepts such paths and Chrome sends them, so ignore +// that part of the spec. See golang.org/issue/19103. +func validPseudoPath(v string) bool { + return (len(v) > 0 && v[0] == '/') || v == "*" +} + +func validateHeaders(hdrs map[string][]string) string { + for k, vv := range hdrs { + if !httpguts.ValidHeaderFieldName(k) && k != ":protocol" { + return fmt.Sprintf("name %q", k) + } + for _, v := range vv { + if !httpguts.ValidHeaderFieldValue(v) { + // Don't include the value in the error, + // because it may be sensitive. + return fmt.Sprintf("value for header %q", k) + } + } + } + return "" +} + +// shouldSendReqContentLength reports whether we should send +// a "content-length" request header. This logic is basically a copy of the net/http +// transferWriter.shouldSendContentLength. +// The contentLength is the corrected contentLength (so 0 means actually 0, not unknown). +// -1 means unknown. +func shouldSendReqContentLength(method string, contentLength int64) bool { + if contentLength > 0 { + return true + } + if contentLength < 0 { + return false + } + // For zero bodies, whether we send a content-length depends on the method. + // It also kinda doesn't matter for http2 either way, with END_STREAM. + switch method { + case "POST", "PUT", "PATCH": + return true + default: + return false + } +} + +// ServerRequestParam is parameters to NewServerRequest. +type ServerRequestParam struct { + Method string + Scheme, Authority, Path string + Protocol string + Header map[string][]string +} + +// ServerRequestResult is the result of NewServerRequest. +type ServerRequestResult struct { + // Various http.Request fields. + URL *url.URL + RequestURI string + Trailer map[string][]string + + NeedsContinue bool // client provided an "Expect: 100-continue" header + + // If the request should be rejected, this is a short string suitable for passing + // to the http2 package's CountError function. + // It might be a bit odd to return errors this way rather than returing an error, + // but this ensures we don't forget to include a CountError reason. + InvalidReason string +} + +func NewServerRequest(rp ServerRequestParam) ServerRequestResult { + needsContinue := httpguts.HeaderValuesContainsToken(rp.Header["Expect"], "100-continue") + if needsContinue { + delete(rp.Header, "Expect") + } + // Merge Cookie headers into one "; "-delimited value. + if cookies := rp.Header["Cookie"]; len(cookies) > 1 { + rp.Header["Cookie"] = []string{strings.Join(cookies, "; ")} + } + + // Setup Trailers + var trailer map[string][]string + for _, v := range rp.Header["Trailer"] { + for _, key := range strings.Split(v, ",") { + key = textproto.CanonicalMIMEHeaderKey(textproto.TrimString(key)) + switch key { + case "Transfer-Encoding", "Trailer", "Content-Length": + // Bogus. (copy of http1 rules) + // Ignore. + default: + if trailer == nil { + trailer = make(map[string][]string) + } + trailer[key] = nil + } + } + } + delete(rp.Header, "Trailer") + + // "':authority' MUST NOT include the deprecated userinfo subcomponent + // for "http" or "https" schemed URIs." + // https://www.rfc-editor.org/rfc/rfc9113.html#section-8.3.1-2.3.8 + if strings.IndexByte(rp.Authority, '@') != -1 && (rp.Scheme == "http" || rp.Scheme == "https") { + return ServerRequestResult{ + InvalidReason: "userinfo_in_authority", + } + } + + var url_ *url.URL + var requestURI string + if rp.Method == "CONNECT" && rp.Protocol == "" { + url_ = &url.URL{Host: rp.Authority} + requestURI = rp.Authority // mimic HTTP/1 server behavior + } else { + var err error + url_, err = url.ParseRequestURI(rp.Path) + if err != nil { + return ServerRequestResult{ + InvalidReason: "bad_path", + } + } + requestURI = rp.Path + } + + return ServerRequestResult{ + URL: url_, + NeedsContinue: needsContinue, + RequestURI: requestURI, + Trailer: trailer, + } +} diff --git a/vendor/golang.org/x/net/proxy/per_host.go b/vendor/golang.org/x/net/proxy/per_host.go index d7d4b8b6e356..32bdf435ecdc 100644 --- a/vendor/golang.org/x/net/proxy/per_host.go +++ b/vendor/golang.org/x/net/proxy/per_host.go @@ -7,6 +7,7 @@ package proxy import ( "context" "net" + "net/netip" "strings" ) @@ -57,7 +58,8 @@ func (p *PerHost) DialContext(ctx context.Context, network, addr string) (c net. } func (p *PerHost) dialerForRequest(host string) Dialer { - if ip := net.ParseIP(host); ip != nil { + if nip, err := netip.ParseAddr(host); err == nil { + ip := net.IP(nip.AsSlice()) for _, net := range p.bypassNetworks { if net.Contains(ip) { return p.bypass @@ -108,8 +110,8 @@ func (p *PerHost) AddFromString(s string) { } continue } - if ip := net.ParseIP(host); ip != nil { - p.AddIP(ip) + if nip, err := netip.ParseAddr(host); err == nil { + p.AddIP(net.IP(nip.AsSlice())) continue } if strings.HasPrefix(host, "*.") { diff --git a/vendor/golang.org/x/net/websocket/websocket.go b/vendor/golang.org/x/net/websocket/websocket.go index ac76165cebb0..3448d20395ce 100644 --- a/vendor/golang.org/x/net/websocket/websocket.go +++ b/vendor/golang.org/x/net/websocket/websocket.go @@ -6,9 +6,10 @@ // as specified in RFC 6455. // // This package currently lacks some features found in an alternative -// and more actively maintained WebSocket package: +// and more actively maintained WebSocket packages: // -// https://pkg.go.dev/github.com/coder/websocket +// - [github.com/gorilla/websocket] +// - [github.com/coder/websocket] package websocket // import "golang.org/x/net/websocket" import ( diff --git a/vendor/golang.org/x/sync/errgroup/errgroup.go b/vendor/golang.org/x/sync/errgroup/errgroup.go index 948a3ee63d4f..a4ea5d14f158 100644 --- a/vendor/golang.org/x/sync/errgroup/errgroup.go +++ b/vendor/golang.org/x/sync/errgroup/errgroup.go @@ -46,7 +46,7 @@ func (g *Group) done() { // returns a non-nil error or the first time Wait returns, whichever occurs // first. func WithContext(ctx context.Context) (*Group, context.Context) { - ctx, cancel := withCancelCause(ctx) + ctx, cancel := context.WithCancelCause(ctx) return &Group{cancel: cancel}, ctx } @@ -118,6 +118,7 @@ func (g *Group) TryGo(f func() error) bool { // SetLimit limits the number of active goroutines in this group to at most n. // A negative value indicates no limit. +// A limit of zero will prevent any new goroutines from being added. // // Any subsequent call to the Go method will block until it can add an active // goroutine without exceeding the configured limit. diff --git a/vendor/golang.org/x/sync/errgroup/go120.go b/vendor/golang.org/x/sync/errgroup/go120.go deleted file mode 100644 index f93c740b638c..000000000000 --- a/vendor/golang.org/x/sync/errgroup/go120.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2023 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.20 - -package errgroup - -import "context" - -func withCancelCause(parent context.Context) (context.Context, func(error)) { - return context.WithCancelCause(parent) -} diff --git a/vendor/golang.org/x/sync/errgroup/pre_go120.go b/vendor/golang.org/x/sync/errgroup/pre_go120.go deleted file mode 100644 index 88ce33434e23..000000000000 --- a/vendor/golang.org/x/sync/errgroup/pre_go120.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2023 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.20 - -package errgroup - -import "context" - -func withCancelCause(parent context.Context) (context.Context, func(error)) { - ctx, cancel := context.WithCancel(parent) - return ctx, func(error) { cancel() } -} diff --git a/vendor/golang.org/x/sys/cpu/cpu.go b/vendor/golang.org/x/sys/cpu/cpu.go index 02609d5b21d5..9c105f23afcd 100644 --- a/vendor/golang.org/x/sys/cpu/cpu.go +++ b/vendor/golang.org/x/sys/cpu/cpu.go @@ -72,6 +72,9 @@ var X86 struct { HasSSSE3 bool // Supplemental streaming SIMD extension 3 HasSSE41 bool // Streaming SIMD extension 4 and 4.1 HasSSE42 bool // Streaming SIMD extension 4 and 4.2 + HasAVXIFMA bool // Advanced vector extension Integer Fused Multiply Add + HasAVXVNNI bool // Advanced vector extension Vector Neural Network Instructions + HasAVXVNNIInt8 bool // Advanced vector extension Vector Neural Network Int8 instructions _ CacheLinePad } diff --git a/vendor/golang.org/x/sys/cpu/cpu_x86.go b/vendor/golang.org/x/sys/cpu/cpu_x86.go index 600a6807861e..1e642f3304fa 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_x86.go +++ b/vendor/golang.org/x/sys/cpu/cpu_x86.go @@ -53,6 +53,9 @@ func initOptions() { {Name: "sse41", Feature: &X86.HasSSE41}, {Name: "sse42", Feature: &X86.HasSSE42}, {Name: "ssse3", Feature: &X86.HasSSSE3}, + {Name: "avxifma", Feature: &X86.HasAVXIFMA}, + {Name: "avxvnni", Feature: &X86.HasAVXVNNI}, + {Name: "avxvnniint8", Feature: &X86.HasAVXVNNIInt8}, // These capabilities should always be enabled on amd64: {Name: "sse2", Feature: &X86.HasSSE2, Required: runtime.GOARCH == "amd64"}, @@ -106,7 +109,7 @@ func archInit() { return } - _, ebx7, ecx7, edx7 := cpuid(7, 0) + eax7, ebx7, ecx7, edx7 := cpuid(7, 0) X86.HasBMI1 = isSet(3, ebx7) X86.HasAVX2 = isSet(5, ebx7) && osSupportsAVX X86.HasBMI2 = isSet(8, ebx7) @@ -134,14 +137,24 @@ func archInit() { X86.HasAVX512VAES = isSet(9, ecx7) X86.HasAVX512VBMI2 = isSet(6, ecx7) X86.HasAVX512BITALG = isSet(12, ecx7) - - eax71, _, _, _ := cpuid(7, 1) - X86.HasAVX512BF16 = isSet(5, eax71) } X86.HasAMXTile = isSet(24, edx7) X86.HasAMXInt8 = isSet(25, edx7) X86.HasAMXBF16 = isSet(22, edx7) + + // These features depend on the second level of extended features. + if eax7 >= 1 { + eax71, _, _, edx71 := cpuid(7, 1) + if X86.HasAVX512 { + X86.HasAVX512BF16 = isSet(5, eax71) + } + if X86.HasAVX { + X86.HasAVXIFMA = isSet(23, eax71) + X86.HasAVXVNNI = isSet(4, eax71) + X86.HasAVXVNNIInt8 = isSet(4, edx71) + } + } } func isSet(bitpos uint, value uint32) bool { diff --git a/vendor/golang.org/x/sys/unix/auxv.go b/vendor/golang.org/x/sys/unix/auxv.go new file mode 100644 index 000000000000..37a82528f580 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/auxv.go @@ -0,0 +1,36 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.21 && (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos) + +package unix + +import ( + "syscall" + "unsafe" +) + +//go:linkname runtime_getAuxv runtime.getAuxv +func runtime_getAuxv() []uintptr + +// Auxv returns the ELF auxiliary vector as a sequence of key/value pairs. +// The returned slice is always a fresh copy, owned by the caller. +// It returns an error on non-ELF platforms, or if the auxiliary vector cannot be accessed, +// which happens in some locked-down environments and build modes. +func Auxv() ([][2]uintptr, error) { + vec := runtime_getAuxv() + vecLen := len(vec) + + if vecLen == 0 { + return nil, syscall.ENOENT + } + + if vecLen%2 != 0 { + return nil, syscall.EINVAL + } + + result := make([]uintptr, vecLen) + copy(result, vec) + return unsafe.Slice((*[2]uintptr)(unsafe.Pointer(&result[0])), vecLen/2), nil +} diff --git a/vendor/golang.org/x/sys/unix/auxv_unsupported.go b/vendor/golang.org/x/sys/unix/auxv_unsupported.go new file mode 100644 index 000000000000..1200487f2e86 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/auxv_unsupported.go @@ -0,0 +1,13 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.21 && (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos) + +package unix + +import "syscall" + +func Auxv() ([][2]uintptr, error) { + return nil, syscall.ENOTSUP +} diff --git a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go index 97cb916f2c90..be8c0020701e 100644 --- a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go +++ b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go @@ -246,6 +246,18 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e return sendfile(outfd, infd, offset, count) } +func Dup3(oldfd, newfd, flags int) error { + if oldfd == newfd || flags&^O_CLOEXEC != 0 { + return EINVAL + } + how := F_DUP2FD + if flags&O_CLOEXEC != 0 { + how = F_DUP2FD_CLOEXEC + } + _, err := fcntl(oldfd, how, newfd) + return err +} + /* * Exposed directly */ diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris.go b/vendor/golang.org/x/sys/unix/syscall_solaris.go index 21974af064dd..abc3955477c7 100644 --- a/vendor/golang.org/x/sys/unix/syscall_solaris.go +++ b/vendor/golang.org/x/sys/unix/syscall_solaris.go @@ -1102,3 +1102,90 @@ func (s *Strioctl) SetInt(i int) { func IoctlSetStrioctlRetInt(fd int, req int, s *Strioctl) (int, error) { return ioctlPtrRet(fd, req, unsafe.Pointer(s)) } + +// Ucred Helpers +// See ucred(3c) and getpeerucred(3c) + +//sys getpeerucred(fd uintptr, ucred *uintptr) (err error) +//sys ucredFree(ucred uintptr) = ucred_free +//sys ucredGet(pid int) (ucred uintptr, err error) = ucred_get +//sys ucredGeteuid(ucred uintptr) (uid int) = ucred_geteuid +//sys ucredGetegid(ucred uintptr) (gid int) = ucred_getegid +//sys ucredGetruid(ucred uintptr) (uid int) = ucred_getruid +//sys ucredGetrgid(ucred uintptr) (gid int) = ucred_getrgid +//sys ucredGetsuid(ucred uintptr) (uid int) = ucred_getsuid +//sys ucredGetsgid(ucred uintptr) (gid int) = ucred_getsgid +//sys ucredGetpid(ucred uintptr) (pid int) = ucred_getpid + +// Ucred is an opaque struct that holds user credentials. +type Ucred struct { + ucred uintptr +} + +// We need to ensure that ucredFree is called on the underlying ucred +// when the Ucred is garbage collected. +func ucredFinalizer(u *Ucred) { + ucredFree(u.ucred) +} + +func GetPeerUcred(fd uintptr) (*Ucred, error) { + var ucred uintptr + err := getpeerucred(fd, &ucred) + if err != nil { + return nil, err + } + result := &Ucred{ + ucred: ucred, + } + // set the finalizer on the result so that the ucred will be freed + runtime.SetFinalizer(result, ucredFinalizer) + return result, nil +} + +func UcredGet(pid int) (*Ucred, error) { + ucred, err := ucredGet(pid) + if err != nil { + return nil, err + } + result := &Ucred{ + ucred: ucred, + } + // set the finalizer on the result so that the ucred will be freed + runtime.SetFinalizer(result, ucredFinalizer) + return result, nil +} + +func (u *Ucred) Geteuid() int { + defer runtime.KeepAlive(u) + return ucredGeteuid(u.ucred) +} + +func (u *Ucred) Getruid() int { + defer runtime.KeepAlive(u) + return ucredGetruid(u.ucred) +} + +func (u *Ucred) Getsuid() int { + defer runtime.KeepAlive(u) + return ucredGetsuid(u.ucred) +} + +func (u *Ucred) Getegid() int { + defer runtime.KeepAlive(u) + return ucredGetegid(u.ucred) +} + +func (u *Ucred) Getrgid() int { + defer runtime.KeepAlive(u) + return ucredGetrgid(u.ucred) +} + +func (u *Ucred) Getsgid() int { + defer runtime.KeepAlive(u) + return ucredGetsgid(u.ucred) +} + +func (u *Ucred) Getpid() int { + defer runtime.KeepAlive(u) + return ucredGetpid(u.ucred) +} diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index 6ebc48b3fecd..4f432bfe8fee 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -1245,6 +1245,7 @@ const ( FAN_REPORT_DFID_NAME = 0xc00 FAN_REPORT_DFID_NAME_TARGET = 0x1e00 FAN_REPORT_DIR_FID = 0x400 + FAN_REPORT_FD_ERROR = 0x2000 FAN_REPORT_FID = 0x200 FAN_REPORT_NAME = 0x800 FAN_REPORT_PIDFD = 0x80 @@ -1330,8 +1331,10 @@ const ( FUSE_SUPER_MAGIC = 0x65735546 FUTEXFS_SUPER_MAGIC = 0xbad1dea F_ADD_SEALS = 0x409 + F_CREATED_QUERY = 0x404 F_DUPFD = 0x0 F_DUPFD_CLOEXEC = 0x406 + F_DUPFD_QUERY = 0x403 F_EXLCK = 0x4 F_GETFD = 0x1 F_GETFL = 0x3 @@ -1551,6 +1554,7 @@ const ( IPPROTO_ROUTING = 0x2b IPPROTO_RSVP = 0x2e IPPROTO_SCTP = 0x84 + IPPROTO_SMC = 0x100 IPPROTO_TCP = 0x6 IPPROTO_TP = 0x1d IPPROTO_UDP = 0x11 @@ -1623,6 +1627,8 @@ const ( IPV6_UNICAST_IF = 0x4c IPV6_USER_FLOW = 0xe IPV6_V6ONLY = 0x1a + IPV6_VERSION = 0x60 + IPV6_VERSION_MASK = 0xf0 IPV6_XFRM_POLICY = 0x23 IP_ADD_MEMBERSHIP = 0x23 IP_ADD_SOURCE_MEMBERSHIP = 0x27 @@ -1867,6 +1873,7 @@ const ( MADV_UNMERGEABLE = 0xd MADV_WILLNEED = 0x3 MADV_WIPEONFORK = 0x12 + MAP_DROPPABLE = 0x8 MAP_FILE = 0x0 MAP_FIXED = 0x10 MAP_FIXED_NOREPLACE = 0x100000 @@ -1967,6 +1974,7 @@ const ( MSG_PEEK = 0x2 MSG_PROXY = 0x10 MSG_RST = 0x1000 + MSG_SOCK_DEVMEM = 0x2000000 MSG_SYN = 0x400 MSG_TRUNC = 0x20 MSG_TRYHARD = 0x4 @@ -2083,6 +2091,7 @@ const ( NFC_ATR_REQ_MAXSIZE = 0x40 NFC_ATR_RES_GB_MAXSIZE = 0x2f NFC_ATR_RES_MAXSIZE = 0x40 + NFC_ATS_MAXSIZE = 0x14 NFC_COMM_ACTIVE = 0x0 NFC_COMM_PASSIVE = 0x1 NFC_DEVICE_NAME_MAXSIZE = 0x8 @@ -2163,6 +2172,7 @@ const ( NFNL_SUBSYS_QUEUE = 0x3 NFNL_SUBSYS_ULOG = 0x4 NFS_SUPER_MAGIC = 0x6969 + NFT_BITWISE_BOOL = 0x0 NFT_CHAIN_FLAGS = 0x7 NFT_CHAIN_MAXNAMELEN = 0x100 NFT_CT_MAX = 0x17 @@ -2491,6 +2501,7 @@ const ( PR_GET_PDEATHSIG = 0x2 PR_GET_SECCOMP = 0x15 PR_GET_SECUREBITS = 0x1b + PR_GET_SHADOW_STACK_STATUS = 0x4a PR_GET_SPECULATION_CTRL = 0x34 PR_GET_TAGGED_ADDR_CTRL = 0x38 PR_GET_THP_DISABLE = 0x2a @@ -2499,6 +2510,7 @@ const ( PR_GET_TIMING = 0xd PR_GET_TSC = 0x19 PR_GET_UNALIGN = 0x5 + PR_LOCK_SHADOW_STACK_STATUS = 0x4c PR_MCE_KILL = 0x21 PR_MCE_KILL_CLEAR = 0x0 PR_MCE_KILL_DEFAULT = 0x2 @@ -2525,6 +2537,8 @@ const ( PR_PAC_GET_ENABLED_KEYS = 0x3d PR_PAC_RESET_KEYS = 0x36 PR_PAC_SET_ENABLED_KEYS = 0x3c + PR_PMLEN_MASK = 0x7f000000 + PR_PMLEN_SHIFT = 0x18 PR_PPC_DEXCR_CTRL_CLEAR = 0x4 PR_PPC_DEXCR_CTRL_CLEAR_ONEXEC = 0x10 PR_PPC_DEXCR_CTRL_EDITABLE = 0x1 @@ -2592,6 +2606,7 @@ const ( PR_SET_PTRACER = 0x59616d61 PR_SET_SECCOMP = 0x16 PR_SET_SECUREBITS = 0x1c + PR_SET_SHADOW_STACK_STATUS = 0x4b PR_SET_SPECULATION_CTRL = 0x35 PR_SET_SYSCALL_USER_DISPATCH = 0x3b PR_SET_TAGGED_ADDR_CTRL = 0x37 @@ -2602,6 +2617,9 @@ const ( PR_SET_UNALIGN = 0x6 PR_SET_VMA = 0x53564d41 PR_SET_VMA_ANON_NAME = 0x0 + PR_SHADOW_STACK_ENABLE = 0x1 + PR_SHADOW_STACK_PUSH = 0x4 + PR_SHADOW_STACK_WRITE = 0x2 PR_SME_GET_VL = 0x40 PR_SME_SET_VL = 0x3f PR_SME_SET_VL_ONEXEC = 0x40000 @@ -2911,7 +2929,6 @@ const ( RTM_NEWNEXTHOP = 0x68 RTM_NEWNEXTHOPBUCKET = 0x74 RTM_NEWNSID = 0x58 - RTM_NEWNVLAN = 0x70 RTM_NEWPREFIX = 0x34 RTM_NEWQDISC = 0x24 RTM_NEWROUTE = 0x18 @@ -2920,6 +2937,7 @@ const ( RTM_NEWTCLASS = 0x28 RTM_NEWTFILTER = 0x2c RTM_NEWTUNNEL = 0x78 + RTM_NEWVLAN = 0x70 RTM_NR_FAMILIES = 0x1b RTM_NR_MSGTYPES = 0x6c RTM_SETDCB = 0x4f diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index c0d45e320505..75207613c785 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -116,6 +116,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -304,6 +306,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index c731d24f0252..c68acda53522 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -116,6 +116,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -305,6 +307,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index 680018a4a7c9..a8c607ab86b5 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -115,6 +115,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -310,6 +312,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index a63909f308d6..18563dd8d33a 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -109,6 +109,7 @@ const ( F_SETOWN = 0x8 F_UNLCK = 0x2 F_WRLCK = 0x1 + GCS_MAGIC = 0x47435300 HIDIOCGRAWINFO = 0x80084803 HIDIOCGRDESC = 0x90044802 HIDIOCGRDESCSIZE = 0x80044801 @@ -119,6 +120,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -302,6 +305,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go index 9b0a2573fe3f..22912cdaa944 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go @@ -116,6 +116,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -297,6 +299,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index 958e6e0645ac..29344eb37ab5 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -115,6 +115,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x80 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPV6_FLOWINFO_MASK = 0xfffffff + IPV6_FLOWLABEL_MASK = 0xfffff ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -303,6 +305,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index 50c7f25bd16c..20d51fb96a89 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -115,6 +115,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x80 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPV6_FLOWINFO_MASK = 0xfffffff + IPV6_FLOWLABEL_MASK = 0xfffff ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -303,6 +305,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index ced21d66d955..321b60902ae5 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -115,6 +115,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x80 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -303,6 +305,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index 226c04419023..9bacdf1e2791 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -115,6 +115,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x80 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -303,6 +305,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go index 3122737cd464..c2242726156a 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -115,6 +115,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPV6_FLOWINFO_MASK = 0xfffffff + IPV6_FLOWLABEL_MASK = 0xfffff ISIG = 0x80 IUCLC = 0x1000 IXOFF = 0x400 @@ -358,6 +360,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index eb5d3467edf0..6270c8ee13e3 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -115,6 +115,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPV6_FLOWINFO_MASK = 0xfffffff + IPV6_FLOWLABEL_MASK = 0xfffff ISIG = 0x80 IUCLC = 0x1000 IXOFF = 0x400 @@ -362,6 +364,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index e921ebc60b71..9966c1941f83 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -115,6 +115,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x80 IUCLC = 0x1000 IXOFF = 0x400 @@ -362,6 +364,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index 38ba81c55c1f..848e5fcc42e6 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -115,6 +115,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -294,6 +296,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index 71f0400977b3..669b2adb80b7 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -115,6 +115,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + IPV6_FLOWINFO_MASK = 0xfffffff + IPV6_FLOWLABEL_MASK = 0xfffff ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -366,6 +368,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index c44a313322c5..4834e57514e4 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -119,6 +119,8 @@ const ( IN_CLOEXEC = 0x400000 IN_NONBLOCK = 0x4000 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPV6_FLOWINFO_MASK = 0xfffffff + IPV6_FLOWLABEL_MASK = 0xfffff ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -357,6 +359,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x38 SCM_TIMESTAMPING_PKTINFO = 0x3c SCM_TIMESTAMPNS = 0x21 + SCM_TS_OPT_ID = 0x5a SCM_TXTIME = 0x3f SCM_WIFI_STATUS = 0x25 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go index 829b87feb8da..c6545413c45b 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go @@ -141,6 +141,16 @@ import ( //go:cgo_import_dynamic libc_getpeername getpeername "libsocket.so" //go:cgo_import_dynamic libc_setsockopt setsockopt "libsocket.so" //go:cgo_import_dynamic libc_recvfrom recvfrom "libsocket.so" +//go:cgo_import_dynamic libc_getpeerucred getpeerucred "libc.so" +//go:cgo_import_dynamic libc_ucred_get ucred_get "libc.so" +//go:cgo_import_dynamic libc_ucred_geteuid ucred_geteuid "libc.so" +//go:cgo_import_dynamic libc_ucred_getegid ucred_getegid "libc.so" +//go:cgo_import_dynamic libc_ucred_getruid ucred_getruid "libc.so" +//go:cgo_import_dynamic libc_ucred_getrgid ucred_getrgid "libc.so" +//go:cgo_import_dynamic libc_ucred_getsuid ucred_getsuid "libc.so" +//go:cgo_import_dynamic libc_ucred_getsgid ucred_getsgid "libc.so" +//go:cgo_import_dynamic libc_ucred_getpid ucred_getpid "libc.so" +//go:cgo_import_dynamic libc_ucred_free ucred_free "libc.so" //go:cgo_import_dynamic libc_port_create port_create "libc.so" //go:cgo_import_dynamic libc_port_associate port_associate "libc.so" //go:cgo_import_dynamic libc_port_dissociate port_dissociate "libc.so" @@ -280,6 +290,16 @@ import ( //go:linkname procgetpeername libc_getpeername //go:linkname procsetsockopt libc_setsockopt //go:linkname procrecvfrom libc_recvfrom +//go:linkname procgetpeerucred libc_getpeerucred +//go:linkname procucred_get libc_ucred_get +//go:linkname procucred_geteuid libc_ucred_geteuid +//go:linkname procucred_getegid libc_ucred_getegid +//go:linkname procucred_getruid libc_ucred_getruid +//go:linkname procucred_getrgid libc_ucred_getrgid +//go:linkname procucred_getsuid libc_ucred_getsuid +//go:linkname procucred_getsgid libc_ucred_getsgid +//go:linkname procucred_getpid libc_ucred_getpid +//go:linkname procucred_free libc_ucred_free //go:linkname procport_create libc_port_create //go:linkname procport_associate libc_port_associate //go:linkname procport_dissociate libc_port_dissociate @@ -420,6 +440,16 @@ var ( procgetpeername, procsetsockopt, procrecvfrom, + procgetpeerucred, + procucred_get, + procucred_geteuid, + procucred_getegid, + procucred_getruid, + procucred_getrgid, + procucred_getsuid, + procucred_getsgid, + procucred_getpid, + procucred_free, procport_create, procport_associate, procport_dissociate, @@ -2029,6 +2059,90 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func getpeerucred(fd uintptr, ucred *uintptr) (err error) { + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procgetpeerucred)), 2, uintptr(fd), uintptr(unsafe.Pointer(ucred)), 0, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredGet(pid int) (ucred uintptr, err error) { + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procucred_get)), 1, uintptr(pid), 0, 0, 0, 0, 0) + ucred = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredGeteuid(ucred uintptr) (uid int) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_geteuid)), 1, uintptr(ucred), 0, 0, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredGetegid(ucred uintptr) (gid int) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getegid)), 1, uintptr(ucred), 0, 0, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredGetruid(ucred uintptr) (uid int) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getruid)), 1, uintptr(ucred), 0, 0, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredGetrgid(ucred uintptr) (gid int) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getrgid)), 1, uintptr(ucred), 0, 0, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredGetsuid(ucred uintptr) (uid int) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getsuid)), 1, uintptr(ucred), 0, 0, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredGetsgid(ucred uintptr) (gid int) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getsgid)), 1, uintptr(ucred), 0, 0, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredGetpid(ucred uintptr) (pid int) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getpid)), 1, uintptr(ucred), 0, 0, 0, 0, 0) + pid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredFree(ucred uintptr) { + sysvicall6(uintptr(unsafe.Pointer(&procucred_free)), 1, uintptr(ucred), 0, 0, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func port_create() (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procport_create)), 0, 0, 0, 0, 0, 0, 0) n = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go index 524b0820cbc2..c79aaff306ae 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go @@ -458,4 +458,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index f485dbf45656..5eb450695e95 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -381,4 +381,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go index 70b35bf3b09f..05e502974458 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go @@ -422,4 +422,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index 1893e2fe8840..38c53ec51bb3 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -325,4 +325,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go index 16a4017da0ab..31d2e71a18e1 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go @@ -321,4 +321,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go index 7e567f1efff2..f4184a336b0e 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go @@ -442,4 +442,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 4460 SYS_LSM_LIST_MODULES = 4461 SYS_MSEAL = 4462 + SYS_SETXATTRAT = 4463 + SYS_GETXATTRAT = 4464 + SYS_LISTXATTRAT = 4465 + SYS_REMOVEXATTRAT = 4466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go index 38ae55e5ef85..05b9962278f2 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go @@ -372,4 +372,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 5460 SYS_LSM_LIST_MODULES = 5461 SYS_MSEAL = 5462 + SYS_SETXATTRAT = 5463 + SYS_GETXATTRAT = 5464 + SYS_LISTXATTRAT = 5465 + SYS_REMOVEXATTRAT = 5466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go index 55e92e60a82a..43a256e9e675 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go @@ -372,4 +372,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 5460 SYS_LSM_LIST_MODULES = 5461 SYS_MSEAL = 5462 + SYS_SETXATTRAT = 5463 + SYS_GETXATTRAT = 5464 + SYS_LISTXATTRAT = 5465 + SYS_REMOVEXATTRAT = 5466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go index 60658d6a021f..eea5ddfc2207 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go @@ -442,4 +442,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 4460 SYS_LSM_LIST_MODULES = 4461 SYS_MSEAL = 4462 + SYS_SETXATTRAT = 4463 + SYS_GETXATTRAT = 4464 + SYS_LISTXATTRAT = 4465 + SYS_REMOVEXATTRAT = 4466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go index e203e8a7ed4b..0d777bfbb140 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go @@ -449,4 +449,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go index 5944b97d5460..b44636502561 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go @@ -421,4 +421,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go index c66d416dad1c..0c7d21c18816 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go @@ -421,4 +421,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index a5459e766f59..840539169878 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -326,4 +326,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go index 01d86825bb92..fcf1b790d6cf 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go @@ -387,4 +387,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go index 7b703e77cda8..52d15b5f9d45 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go @@ -400,4 +400,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index 5537148dcbb3..a46abe647205 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -4747,7 +4747,7 @@ const ( NL80211_ATTR_MAC_HINT = 0xc8 NL80211_ATTR_MAC_MASK = 0xd7 NL80211_ATTR_MAX_AP_ASSOC_STA = 0xca - NL80211_ATTR_MAX = 0x14c + NL80211_ATTR_MAX = 0x14d NL80211_ATTR_MAX_CRIT_PROT_DURATION = 0xb4 NL80211_ATTR_MAX_CSA_COUNTERS = 0xce NL80211_ATTR_MAX_MATCH_SETS = 0x85 @@ -5519,7 +5519,7 @@ const ( NL80211_MNTR_FLAG_CONTROL = 0x3 NL80211_MNTR_FLAG_COOK_FRAMES = 0x5 NL80211_MNTR_FLAG_FCSFAIL = 0x1 - NL80211_MNTR_FLAG_MAX = 0x6 + NL80211_MNTR_FLAG_MAX = 0x7 NL80211_MNTR_FLAG_OTHER_BSS = 0x4 NL80211_MNTR_FLAG_PLCPFAIL = 0x2 NL80211_MPATH_FLAG_ACTIVE = 0x1 @@ -6174,3 +6174,5 @@ type SockDiagReq struct { Family uint8 Protocol uint8 } + +const RTM_NEWNVLAN = 0x70 diff --git a/vendor/golang.org/x/sys/windows/dll_windows.go b/vendor/golang.org/x/sys/windows/dll_windows.go index 4e613cf6335c..3ca814f54d44 100644 --- a/vendor/golang.org/x/sys/windows/dll_windows.go +++ b/vendor/golang.org/x/sys/windows/dll_windows.go @@ -43,8 +43,8 @@ type DLL struct { // LoadDLL loads DLL file into memory. // // Warning: using LoadDLL without an absolute path name is subject to -// DLL preloading attacks. To safely load a system DLL, use LazyDLL -// with System set to true, or use LoadLibraryEx directly. +// DLL preloading attacks. To safely load a system DLL, use [NewLazySystemDLL], +// or use [LoadLibraryEx] directly. func LoadDLL(name string) (dll *DLL, err error) { namep, err := UTF16PtrFromString(name) if err != nil { @@ -271,6 +271,9 @@ func (d *LazyDLL) NewProc(name string) *LazyProc { } // NewLazyDLL creates new LazyDLL associated with DLL file. +// +// Warning: using NewLazyDLL without an absolute path name is subject to +// DLL preloading attacks. To safely load a system DLL, use [NewLazySystemDLL]. func NewLazyDLL(name string) *LazyDLL { return &LazyDLL{Name: name} } @@ -410,7 +413,3 @@ func loadLibraryEx(name string, system bool) (*DLL, error) { } return &DLL{Name: name, Handle: h}, nil } - -type errString string - -func (s errString) Error() string { return string(s) } diff --git a/vendor/golang.org/x/time/AUTHORS b/vendor/golang.org/x/time/AUTHORS deleted file mode 100644 index 15167cd746c5..000000000000 --- a/vendor/golang.org/x/time/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/time/CONTRIBUTORS b/vendor/golang.org/x/time/CONTRIBUTORS deleted file mode 100644 index 1c4577e96806..000000000000 --- a/vendor/golang.org/x/time/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/time/rate/rate.go b/vendor/golang.org/x/time/rate/rate.go index b0b982e9c6e6..f0e0cf3cb1db 100644 --- a/vendor/golang.org/x/time/rate/rate.go +++ b/vendor/golang.org/x/time/rate/rate.go @@ -80,6 +80,19 @@ func (lim *Limiter) Burst() int { return lim.burst } +// TokensAt returns the number of tokens available at time t. +func (lim *Limiter) TokensAt(t time.Time) float64 { + lim.mu.Lock() + _, tokens := lim.advance(t) // does not mutate lim + lim.mu.Unlock() + return tokens +} + +// Tokens returns the number of tokens available now. +func (lim *Limiter) Tokens() float64 { + return lim.TokensAt(time.Now()) +} + // NewLimiter returns a new Limiter that allows events up to rate r and permits // bursts of at most b tokens. func NewLimiter(r Limit, b int) *Limiter { @@ -89,16 +102,16 @@ func NewLimiter(r Limit, b int) *Limiter { } } -// Allow is shorthand for AllowN(time.Now(), 1). +// Allow reports whether an event may happen now. func (lim *Limiter) Allow() bool { return lim.AllowN(time.Now(), 1) } -// AllowN reports whether n events may happen at time now. +// AllowN reports whether n events may happen at time t. // Use this method if you intend to drop / skip events that exceed the rate limit. // Otherwise use Reserve or Wait. -func (lim *Limiter) AllowN(now time.Time, n int) bool { - return lim.reserveN(now, n, 0).ok +func (lim *Limiter) AllowN(t time.Time, n int) bool { + return lim.reserveN(t, n, 0).ok } // A Reservation holds information about events that are permitted by a Limiter to happen after a delay. @@ -125,17 +138,17 @@ func (r *Reservation) Delay() time.Duration { } // InfDuration is the duration returned by Delay when a Reservation is not OK. -const InfDuration = time.Duration(1<<63 - 1) +const InfDuration = time.Duration(math.MaxInt64) // DelayFrom returns the duration for which the reservation holder must wait // before taking the reserved action. Zero duration means act immediately. // InfDuration means the limiter cannot grant the tokens requested in this // Reservation within the maximum wait time. -func (r *Reservation) DelayFrom(now time.Time) time.Duration { +func (r *Reservation) DelayFrom(t time.Time) time.Duration { if !r.ok { return InfDuration } - delay := r.timeToAct.Sub(now) + delay := r.timeToAct.Sub(t) if delay < 0 { return 0 } @@ -150,7 +163,7 @@ func (r *Reservation) Cancel() { // CancelAt indicates that the reservation holder will not perform the reserved action // and reverses the effects of this Reservation on the rate limit as much as possible, // considering that other reservations may have already been made. -func (r *Reservation) CancelAt(now time.Time) { +func (r *Reservation) CancelAt(t time.Time) { if !r.ok { return } @@ -158,7 +171,7 @@ func (r *Reservation) CancelAt(now time.Time) { r.lim.mu.Lock() defer r.lim.mu.Unlock() - if r.lim.limit == Inf || r.tokens == 0 || r.timeToAct.Before(now) { + if r.lim.limit == Inf || r.tokens == 0 || r.timeToAct.Before(t) { return } @@ -170,18 +183,18 @@ func (r *Reservation) CancelAt(now time.Time) { return } // advance time to now - now, _, tokens := r.lim.advance(now) + t, tokens := r.lim.advance(t) // calculate new number of tokens tokens += restoreTokens if burst := float64(r.lim.burst); tokens > burst { tokens = burst } // update state - r.lim.last = now + r.lim.last = t r.lim.tokens = tokens if r.timeToAct == r.lim.lastEvent { prevEvent := r.timeToAct.Add(r.limit.durationFromTokens(float64(-r.tokens))) - if !prevEvent.Before(now) { + if !prevEvent.Before(t) { r.lim.lastEvent = prevEvent } } @@ -196,18 +209,20 @@ func (lim *Limiter) Reserve() *Reservation { // The Limiter takes this Reservation into account when allowing future events. // The returned Reservation’s OK() method returns false if n exceeds the Limiter's burst size. // Usage example: -// r := lim.ReserveN(time.Now(), 1) -// if !r.OK() { -// // Not allowed to act! Did you remember to set lim.burst to be > 0 ? -// return -// } -// time.Sleep(r.Delay()) -// Act() +// +// r := lim.ReserveN(time.Now(), 1) +// if !r.OK() { +// // Not allowed to act! Did you remember to set lim.burst to be > 0 ? +// return +// } +// time.Sleep(r.Delay()) +// Act() +// // Use this method if you wish to wait and slow down in accordance with the rate limit without dropping events. // If you need to respect a deadline or cancel the delay, use Wait instead. // To drop or skip events exceeding rate limit, use Allow instead. -func (lim *Limiter) ReserveN(now time.Time, n int) *Reservation { - r := lim.reserveN(now, n, InfDuration) +func (lim *Limiter) ReserveN(t time.Time, n int) *Reservation { + r := lim.reserveN(t, n, InfDuration) return &r } @@ -221,6 +236,18 @@ func (lim *Limiter) Wait(ctx context.Context) (err error) { // canceled, or the expected wait time exceeds the Context's Deadline. // The burst limit is ignored if the rate limit is Inf. func (lim *Limiter) WaitN(ctx context.Context, n int) (err error) { + // The test code calls lim.wait with a fake timer generator. + // This is the real timer generator. + newTimer := func(d time.Duration) (<-chan time.Time, func() bool, func()) { + timer := time.NewTimer(d) + return timer.C, timer.Stop, func() {} + } + + return lim.wait(ctx, n, time.Now(), newTimer) +} + +// wait is the internal implementation of WaitN. +func (lim *Limiter) wait(ctx context.Context, n int, t time.Time, newTimer func(d time.Duration) (<-chan time.Time, func() bool, func())) error { lim.mu.Lock() burst := lim.burst limit := lim.limit @@ -236,25 +263,25 @@ func (lim *Limiter) WaitN(ctx context.Context, n int) (err error) { default: } // Determine wait limit - now := time.Now() waitLimit := InfDuration if deadline, ok := ctx.Deadline(); ok { - waitLimit = deadline.Sub(now) + waitLimit = deadline.Sub(t) } // Reserve - r := lim.reserveN(now, n, waitLimit) + r := lim.reserveN(t, n, waitLimit) if !r.ok { return fmt.Errorf("rate: Wait(n=%d) would exceed context deadline", n) } // Wait if necessary - delay := r.DelayFrom(now) + delay := r.DelayFrom(t) if delay == 0 { return nil } - t := time.NewTimer(delay) - defer t.Stop() + ch, stop, advance := newTimer(delay) + defer stop() + advance() // only has an effect when testing select { - case <-t.C: + case <-ch: // We can proceed. return nil case <-ctx.Done(): @@ -273,13 +300,13 @@ func (lim *Limiter) SetLimit(newLimit Limit) { // SetLimitAt sets a new Limit for the limiter. The new Limit, and Burst, may be violated // or underutilized by those which reserved (using Reserve or Wait) but did not yet act // before SetLimitAt was called. -func (lim *Limiter) SetLimitAt(now time.Time, newLimit Limit) { +func (lim *Limiter) SetLimitAt(t time.Time, newLimit Limit) { lim.mu.Lock() defer lim.mu.Unlock() - now, _, tokens := lim.advance(now) + t, tokens := lim.advance(t) - lim.last = now + lim.last = t lim.tokens = tokens lim.limit = newLimit } @@ -290,13 +317,13 @@ func (lim *Limiter) SetBurst(newBurst int) { } // SetBurstAt sets a new burst size for the limiter. -func (lim *Limiter) SetBurstAt(now time.Time, newBurst int) { +func (lim *Limiter) SetBurstAt(t time.Time, newBurst int) { lim.mu.Lock() defer lim.mu.Unlock() - now, _, tokens := lim.advance(now) + t, tokens := lim.advance(t) - lim.last = now + lim.last = t lim.tokens = tokens lim.burst = newBurst } @@ -304,7 +331,7 @@ func (lim *Limiter) SetBurstAt(now time.Time, newBurst int) { // reserveN is a helper method for AllowN, ReserveN, and WaitN. // maxFutureReserve specifies the maximum reservation wait duration allowed. // reserveN returns Reservation, not *Reservation, to avoid allocation in AllowN and WaitN. -func (lim *Limiter) reserveN(now time.Time, n int, maxFutureReserve time.Duration) Reservation { +func (lim *Limiter) reserveN(t time.Time, n int, maxFutureReserve time.Duration) Reservation { lim.mu.Lock() defer lim.mu.Unlock() @@ -313,7 +340,7 @@ func (lim *Limiter) reserveN(now time.Time, n int, maxFutureReserve time.Duratio ok: true, lim: lim, tokens: n, - timeToAct: now, + timeToAct: t, } } else if lim.limit == 0 { var ok bool @@ -325,11 +352,11 @@ func (lim *Limiter) reserveN(now time.Time, n int, maxFutureReserve time.Duratio ok: ok, lim: lim, tokens: lim.burst, - timeToAct: now, + timeToAct: t, } } - now, last, tokens := lim.advance(now) + t, tokens := lim.advance(t) // Calculate the remaining number of tokens resulting from the request. tokens -= float64(n) @@ -351,16 +378,12 @@ func (lim *Limiter) reserveN(now time.Time, n int, maxFutureReserve time.Duratio } if ok { r.tokens = n - r.timeToAct = now.Add(waitDuration) - } + r.timeToAct = t.Add(waitDuration) - // Update state - if ok { - lim.last = now + // Update state + lim.last = t lim.tokens = tokens lim.lastEvent = r.timeToAct - } else { - lim.last = last } return r @@ -369,20 +392,20 @@ func (lim *Limiter) reserveN(now time.Time, n int, maxFutureReserve time.Duratio // advance calculates and returns an updated state for lim resulting from the passage of time. // lim is not changed. // advance requires that lim.mu is held. -func (lim *Limiter) advance(now time.Time) (newNow time.Time, newLast time.Time, newTokens float64) { +func (lim *Limiter) advance(t time.Time) (newT time.Time, newTokens float64) { last := lim.last - if now.Before(last) { - last = now + if t.Before(last) { + last = t } // Calculate the new number of tokens, due to time that passed. - elapsed := now.Sub(last) + elapsed := t.Sub(last) delta := lim.limit.tokensFromDuration(elapsed) tokens := lim.tokens + delta if burst := float64(lim.burst); tokens > burst { tokens = burst } - return now, last, tokens + return t, tokens } // durationFromTokens is a unit conversion function from the number of tokens to the duration diff --git a/vendor/golang.org/x/time/rate/sometimes.go b/vendor/golang.org/x/time/rate/sometimes.go new file mode 100644 index 000000000000..6ba99ddb67b1 --- /dev/null +++ b/vendor/golang.org/x/time/rate/sometimes.go @@ -0,0 +1,67 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package rate + +import ( + "sync" + "time" +) + +// Sometimes will perform an action occasionally. The First, Every, and +// Interval fields govern the behavior of Do, which performs the action. +// A zero Sometimes value will perform an action exactly once. +// +// # Example: logging with rate limiting +// +// var sometimes = rate.Sometimes{First: 3, Interval: 10*time.Second} +// func Spammy() { +// sometimes.Do(func() { log.Info("here I am!") }) +// } +type Sometimes struct { + First int // if non-zero, the first N calls to Do will run f. + Every int // if non-zero, every Nth call to Do will run f. + Interval time.Duration // if non-zero and Interval has elapsed since f's last run, Do will run f. + + mu sync.Mutex + count int // number of Do calls + last time.Time // last time f was run +} + +// Do runs the function f as allowed by First, Every, and Interval. +// +// The model is a union (not intersection) of filters. The first call to Do +// always runs f. Subsequent calls to Do run f if allowed by First or Every or +// Interval. +// +// A non-zero First:N causes the first N Do(f) calls to run f. +// +// A non-zero Every:M causes every Mth Do(f) call, starting with the first, to +// run f. +// +// A non-zero Interval causes Do(f) to run f if Interval has elapsed since +// Do last ran f. +// +// Specifying multiple filters produces the union of these execution streams. +// For example, specifying both First:N and Every:M causes the first N Do(f) +// calls and every Mth Do(f) call, starting with the first, to run f. See +// Examples for more. +// +// If Do is called multiple times simultaneously, the calls will block and run +// serially. Therefore, Do is intended for lightweight operations. +// +// Because a call to Do may block until f returns, if f causes Do to be called, +// it will deadlock. +func (s *Sometimes) Do(f func()) { + s.mu.Lock() + defer s.mu.Unlock() + if s.count == 0 || + (s.First > 0 && s.count < s.First) || + (s.Every > 0 && s.count%s.Every == 0) || + (s.Interval > 0 && time.Since(s.last) >= s.Interval) { + f() + s.last = time.Now() + } + s.count++ +} diff --git a/vendor/k8s.io/klog/v2/format.go b/vendor/k8s.io/klog/v2/format.go new file mode 100644 index 000000000000..63995ca6dbf4 --- /dev/null +++ b/vendor/k8s.io/klog/v2/format.go @@ -0,0 +1,65 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package klog + +import ( + "encoding/json" + "fmt" + "strings" + + "github.com/go-logr/logr" +) + +// Format wraps a value of an arbitrary type and implement fmt.Stringer and +// logr.Marshaler for them. Stringer returns pretty-printed JSON. MarshalLog +// returns the original value with a type that has no special methods, in +// particular no MarshalLog or MarshalJSON. +// +// Wrapping values like that is useful when the value has a broken +// implementation of these special functions (for example, a type which +// inherits String from TypeMeta, but then doesn't re-implement String) or the +// implementation produces output that is less readable or unstructured (for +// example, the generated String functions for Kubernetes API types). +func Format(obj interface{}) interface{} { + return formatAny{Object: obj} +} + +type formatAny struct { + Object interface{} +} + +func (f formatAny) String() string { + var buffer strings.Builder + encoder := json.NewEncoder(&buffer) + encoder.SetIndent("", " ") + if err := encoder.Encode(&f.Object); err != nil { + return fmt.Sprintf("error marshaling %T to JSON: %v", f, err) + } + return buffer.String() +} + +func (f formatAny) MarshalLog() interface{} { + // Returning a pointer to a pointer ensures that zapr doesn't find a + // fmt.Stringer or logr.Marshaler when it checks the type of the + // value. It then falls back to reflection, which dumps the value being + // pointed to (JSON doesn't have pointers). + ptr := &f.Object + return &ptr +} + +var _ fmt.Stringer = formatAny{} +var _ logr.Marshaler = formatAny{} diff --git a/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go b/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go index 1dc81a15fa62..bcdf5f8ee12d 100644 --- a/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go +++ b/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go @@ -18,6 +18,7 @@ package serialize import ( "bytes" + "encoding/json" "fmt" "strconv" @@ -196,11 +197,11 @@ func (f Formatter) KVFormat(b *bytes.Buffer, k, v interface{}) { case textWriter: writeTextWriterValue(b, v) case fmt.Stringer: - writeStringValue(b, true, StringerToString(v)) + writeStringValue(b, StringerToString(v)) case string: - writeStringValue(b, true, v) + writeStringValue(b, v) case error: - writeStringValue(b, true, ErrorToString(v)) + writeStringValue(b, ErrorToString(v)) case logr.Marshaler: value := MarshalerToValue(v) // A marshaler that returns a string is useful for @@ -215,9 +216,9 @@ func (f Formatter) KVFormat(b *bytes.Buffer, k, v interface{}) { // value directly. switch value := value.(type) { case string: - writeStringValue(b, true, value) + writeStringValue(b, value) default: - writeStringValue(b, false, f.AnyToString(value)) + f.formatAny(b, value) } case []byte: // In https://github.com/kubernetes/klog/pull/237 it was decided @@ -234,7 +235,7 @@ func (f Formatter) KVFormat(b *bytes.Buffer, k, v interface{}) { b.WriteByte('=') b.WriteString(fmt.Sprintf("%+q", v)) default: - writeStringValue(b, false, f.AnyToString(v)) + f.formatAny(b, v) } } @@ -242,12 +243,25 @@ func KVFormat(b *bytes.Buffer, k, v interface{}) { Formatter{}.KVFormat(b, k, v) } -// AnyToString is the historic fallback formatter. -func (f Formatter) AnyToString(v interface{}) string { +// formatAny is the fallback formatter for a value. It supports a hook (for +// example, for YAML encoding) and itself uses JSON encoding. +func (f Formatter) formatAny(b *bytes.Buffer, v interface{}) { + b.WriteRune('=') if f.AnyToStringHook != nil { - return f.AnyToStringHook(v) + b.WriteString(f.AnyToStringHook(v)) + return + } + encoder := json.NewEncoder(b) + l := b.Len() + if err := encoder.Encode(v); err != nil { + // This shouldn't happen. We discard whatever the encoder + // wrote and instead dump an error string. + b.Truncate(l) + b.WriteString(fmt.Sprintf(`""`, err)) + return } - return fmt.Sprintf("%+v", v) + // Remove trailing newline. + b.Truncate(b.Len() - 1) } // StringerToString converts a Stringer to a string, @@ -287,7 +301,7 @@ func ErrorToString(err error) (ret string) { } func writeTextWriterValue(b *bytes.Buffer, v textWriter) { - b.WriteRune('=') + b.WriteByte('=') defer func() { if err := recover(); err != nil { fmt.Fprintf(b, `""`, err) @@ -296,18 +310,13 @@ func writeTextWriterValue(b *bytes.Buffer, v textWriter) { v.WriteText(b) } -func writeStringValue(b *bytes.Buffer, quote bool, v string) { +func writeStringValue(b *bytes.Buffer, v string) { data := []byte(v) index := bytes.IndexByte(data, '\n') if index == -1 { b.WriteByte('=') - if quote { - // Simple string, quote quotation marks and non-printable characters. - b.WriteString(strconv.Quote(v)) - return - } - // Non-string with no line breaks. - b.WriteString(v) + // Simple string, quote quotation marks and non-printable characters. + b.WriteString(strconv.Quote(v)) return } diff --git a/vendor/k8s.io/klog/v2/k8s_references.go b/vendor/k8s.io/klog/v2/k8s_references.go index ecd3f8b69033..786af74bfd38 100644 --- a/vendor/k8s.io/klog/v2/k8s_references.go +++ b/vendor/k8s.io/klog/v2/k8s_references.go @@ -178,14 +178,14 @@ func (ks kobjSlice) process() (objs []interface{}, err string) { return objectRefs, "" } -var nilToken = []byte("") +var nilToken = []byte("null") func (ks kobjSlice) WriteText(out *bytes.Buffer) { s := reflect.ValueOf(ks.arg) switch s.Kind() { case reflect.Invalid: - // nil parameter, print as empty slice. - out.WriteString("[]") + // nil parameter, print as null. + out.Write(nilToken) return case reflect.Slice: // Okay, handle below. @@ -197,15 +197,15 @@ func (ks kobjSlice) WriteText(out *bytes.Buffer) { defer out.Write([]byte{']'}) for i := 0; i < s.Len(); i++ { if i > 0 { - out.Write([]byte{' '}) + out.Write([]byte{','}) } item := s.Index(i).Interface() if item == nil { out.Write(nilToken) } else if v, ok := item.(KMetadata); ok { - KObj(v).writeUnquoted(out) + KObj(v).WriteText(out) } else { - fmt.Fprintf(out, "", item) + fmt.Fprintf(out, `""`, item) return } } diff --git a/vendor/k8s.io/klog/v2/klog.go b/vendor/k8s.io/klog/v2/klog.go index 466eeaf265b5..152f8a6bd6d9 100644 --- a/vendor/k8s.io/klog/v2/klog.go +++ b/vendor/k8s.io/klog/v2/klog.go @@ -1228,6 +1228,19 @@ func CopyStandardLogTo(name string) { stdLog.SetOutput(logBridge(sev)) } +// NewStandardLogger returns a Logger that writes to the klog logs for the +// named and lower severities. +// +// Valid names are "INFO", "WARNING", "ERROR", and "FATAL". If the name is not +// recognized, NewStandardLogger panics. +func NewStandardLogger(name string) *stdLog.Logger { + sev, ok := severity.ByName(name) + if !ok { + panic(fmt.Sprintf("klog.NewStandardLogger(%q): unknown severity", name)) + } + return stdLog.New(logBridge(sev), "", stdLog.Lshortfile) +} + // logBridge provides the Write method that enables CopyStandardLogTo to connect // Go's standard logs to the logs provided by this package. type logBridge severity.Severity diff --git a/vendor/modules.txt b/vendor/modules.txt index 8e6dd4082be1..075517e85c94 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -239,7 +239,7 @@ github.com/docker/go-metrics # github.com/docker/go-units v0.5.0 ## explicit github.com/docker/go-units -# github.com/emicklei/go-restful/v3 v3.10.1 +# github.com/emicklei/go-restful/v3 v3.10.2 ## explicit; go 1.13 github.com/emicklei/go-restful/v3 github.com/emicklei/go-restful/v3/log @@ -324,10 +324,10 @@ github.com/json-iterator/go # github.com/klauspost/compress v1.16.7 ## explicit; go 1.18 github.com/klauspost/compress +github.com/klauspost/compress/flate github.com/klauspost/compress/fse -github.com/klauspost/compress/huff0 github.com/klauspost/compress/gzip -github.com/klauspost/compress/flate +github.com/klauspost/compress/huff0 github.com/klauspost/compress/internal/cpuinfo github.com/klauspost/compress/internal/snapref github.com/klauspost/compress/zstd @@ -400,6 +400,9 @@ github.com/opencontainers/runtime-tools/validate/capabilities github.com/opencontainers/selinux/go-selinux github.com/opencontainers/selinux/go-selinux/label github.com/opencontainers/selinux/pkg/pwalkdir +# github.com/openebs/lvm-localpv v1.7.0 +## explicit; go 1.19 +github.com/openebs/lvm-localpv/pkg/apis/openebs.io/lvm/v1alpha1 # github.com/pelletier/go-toml v1.9.5 ## explicit; go 1.12 github.com/pelletier/go-toml @@ -415,10 +418,10 @@ github.com/prometheus/client_golang/prometheus github.com/prometheus/client_golang/prometheus/collectors github.com/prometheus/client_golang/prometheus/internal github.com/prometheus/client_golang/prometheus/promhttp -# github.com/prometheus/client_model v0.3.0 -## explicit; go 1.9 +# github.com/prometheus/client_model v0.4.0 +## explicit; go 1.18 github.com/prometheus/client_model/go -# github.com/prometheus/common v0.42.0 +# github.com/prometheus/common v0.44.0 ## explicit; go 1.18 github.com/prometheus/common/expfmt github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg @@ -535,8 +538,8 @@ go.opentelemetry.io/proto/otlp/collector/trace/v1 go.opentelemetry.io/proto/otlp/common/v1 go.opentelemetry.io/proto/otlp/resource/v1 go.opentelemetry.io/proto/otlp/trace/v1 -# golang.org/x/crypto v0.31.0 -## explicit; go 1.20 +# golang.org/x/crypto v0.36.0 +## explicit; go 1.23.0 golang.org/x/crypto/cast5 golang.org/x/crypto/openpgp golang.org/x/crypto/openpgp/armor @@ -548,13 +551,14 @@ golang.org/x/crypto/pbkdf2 # golang.org/x/mod v0.17.0 ## explicit; go 1.18 golang.org/x/mod/semver -# golang.org/x/net v0.33.0 -## explicit; go 1.18 +# golang.org/x/net v0.38.0 +## explicit; go 1.23.0 golang.org/x/net/context golang.org/x/net/http/httpguts golang.org/x/net/http2 golang.org/x/net/http2/hpack golang.org/x/net/idna +golang.org/x/net/internal/httpcommon golang.org/x/net/internal/socks golang.org/x/net/internal/timeseries golang.org/x/net/proxy @@ -564,12 +568,12 @@ golang.org/x/net/websocket ## explicit; go 1.18 golang.org/x/oauth2 golang.org/x/oauth2/internal -# golang.org/x/sync v0.10.0 -## explicit; go 1.18 +# golang.org/x/sync v0.12.0 +## explicit; go 1.23.0 golang.org/x/sync/errgroup golang.org/x/sync/semaphore -# golang.org/x/sys v0.28.0 -## explicit; go 1.18 +# golang.org/x/sys v0.31.0 +## explicit; go 1.23.0 golang.org/x/sys/cpu golang.org/x/sys/plan9 golang.org/x/sys/unix @@ -578,16 +582,16 @@ golang.org/x/sys/windows/registry golang.org/x/sys/windows/svc golang.org/x/sys/windows/svc/debug golang.org/x/sys/windows/svc/mgr -# golang.org/x/term v0.27.0 -## explicit; go 1.18 +# golang.org/x/term v0.30.0 +## explicit; go 1.23.0 golang.org/x/term -# golang.org/x/text v0.21.0 -## explicit; go 1.18 +# golang.org/x/text v0.23.0 +## explicit; go 1.23.0 golang.org/x/text/secure/bidirule golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm -# golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 +# golang.org/x/time v0.3.0 ## explicit golang.org/x/time/rate # google.golang.org/appengine v1.6.7 @@ -717,7 +721,7 @@ gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.1 ## explicit gopkg.in/yaml.v3 -# k8s.io/api v0.26.2 +# k8s.io/api v0.27.2 => k8s.io/api v0.26.2 ## explicit; go 1.19 k8s.io/api/authentication/v1 k8s.io/api/core/v1 @@ -779,7 +783,7 @@ k8s.io/apiserver/pkg/endpoints/responsewriter k8s.io/apiserver/pkg/features k8s.io/apiserver/pkg/server/httplog k8s.io/apiserver/pkg/util/feature -# k8s.io/client-go v0.26.2 +# k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible => k8s.io/client-go v0.26.2 ## explicit; go 1.19 k8s.io/client-go/pkg/apis/clientauthentication k8s.io/client-go/pkg/apis/clientauthentication/install @@ -812,7 +816,7 @@ k8s.io/component-base/version # k8s.io/cri-api v0.27.1 ## explicit; go 1.20 k8s.io/cri-api/pkg/apis/runtime/v1 -# k8s.io/klog/v2 v2.90.1 +# k8s.io/klog/v2 v2.100.1 ## explicit; go 1.13 k8s.io/klog/v2 k8s.io/klog/v2/internal/buffer @@ -820,7 +824,7 @@ k8s.io/klog/v2/internal/clock k8s.io/klog/v2/internal/dbg k8s.io/klog/v2/internal/serialize k8s.io/klog/v2/internal/severity -# k8s.io/utils v0.0.0-20230220204549-a5ecb0141aa5 +# k8s.io/utils v0.0.0-20230505201702-9f6742963106 ## explicit; go 1.18 k8s.io/utils/clock k8s.io/utils/clock/testing @@ -848,3 +852,6 @@ tags.cncf.io/container-device-interface/pkg/parser # tags.cncf.io/container-device-interface/specs-go v0.8.0 ## explicit; go 1.19 tags.cncf.io/container-device-interface/specs-go +# k8s.io/apimachinery v0.27.2 => k8s.io/apimachinery v0.24.17 +# k8s.io/client-go => k8s.io/client-go v0.26.2 +# k8s.io/api => k8s.io/api v0.26.2 From d79dd183b1209b1c95a120de47f4cb0ba3dbd099 Mon Sep 17 00:00:00 2001 From: luanshaotong Date: Tue, 15 Jul 2025 07:57:19 +0000 Subject: [PATCH 03/19] fix bkt; fix annotation "/" --- container_opts.go | 11 +++++++++-- snapshots/devbox/constants.go | 10 ---------- snapshots/devbox/devbox.go | 24 ++++++------------------ snapshots/devbox/storage/bolt.go | 13 ++++++++----- 4 files changed, 23 insertions(+), 35 deletions(-) delete mode 100644 snapshots/devbox/constants.go diff --git a/container_opts.go b/container_opts.go index 6d2cadaf502a..7afd46281bb8 100644 --- a/container_opts.go +++ b/container_opts.go @@ -236,14 +236,21 @@ func WithNewSnapshot(id string, i Image, opts ...snapshots.Opt) NewContainerOpts } } start_opts := []snapshots.Opt{} - // if sealos.io/devbox/use-limit is set, move it to containerd.io/snapshot/new-layer-limit - if limit, ok := base.Labels["sealos.io/devbox/use-limit"]; ok { + // if sealos.io/devbox/use-limit is set, move it to containerd.io/snapshot-new-layer-limit + if limit, ok := base.Labels["sealos.io/devbox-use-limit"]; ok { if limit != "" { start_opts = append(start_opts, snapshots.WithLabels(map[string]string{ "containerd.io/snapshot/new-layer-limit": limit, })) + fmt.Printf("Using devbox layer limit: %s\n", limit) } } + if contentId, ok := base.Labels["containerd.io/snapshot-devbox-content-id"]; ok && contentId != "" { + start_opts = append(start_opts, snapshots.WithLabels(map[string]string{ + "containerd.io/snapshot/devbox-content-id": contentId, + })) + fmt.Printf("Using devbox content ID: %s\n", contentId) + } start_opts = append(start_opts, opts...) if _, err := s.Prepare(ctx, id, parent, start_opts...); err != nil { return err diff --git a/snapshots/devbox/constants.go b/snapshots/devbox/constants.go deleted file mode 100644 index fe983cfcde79..000000000000 --- a/snapshots/devbox/constants.go +++ /dev/null @@ -1,10 +0,0 @@ -//go:build linux - -package devbox - -const ( - // 改插件默认的存储路径 - DefaultRootDir = "/var/lib/containerd/io.sealos.labring.devbox" - // 该插件提供 grpc 服务的 socks 文件名,路径为 paths.Join(rootDir, SocksFileName) - SocksFileName = "grpc.socks" -) diff --git a/snapshots/devbox/devbox.go b/snapshots/devbox/devbox.go index c36e860fae59..6bb8c8120c29 100644 --- a/snapshots/devbox/devbox.go +++ b/snapshots/devbox/devbox.go @@ -345,15 +345,7 @@ func (o *snapshotter) Remove(ctx context.Context, key string) (err error) { } } for _, lvName := range removedLvNames { - vol := &apis.LVMVolume{ - ObjectMeta: metav1.ObjectMeta{ - Name: lvName, - }, - Spec: apis.VolumeInfo{ - VolGroup: o.lvmVgName, - }, - } - err := lvm.DestroyVolume(vol) + err := o.removeLv(lvName) if err != nil { log.G(ctx).WithError(err).WithField("lvName", lvName).Warn("failed to destroy LVM logical volume") continue @@ -434,15 +426,7 @@ func (o *snapshotter) Cleanup(ctx context.Context) error { } for _, lvName := range cleanupLv { - vol := &apis.LVMVolume{ - ObjectMeta: metav1.ObjectMeta{ - Name: lvName, - }, - Spec: apis.VolumeInfo{ - VolGroup: o.lvmVgName, - }, - } - err := lvm.DestroyVolume(vol) + err := o.removeLv(lvName) if err != nil { log.G(ctx).WithError(err).WithField("lvName", lvName).Warn("failed to destroy LVM logical volume") continue @@ -642,6 +626,10 @@ func (o *snapshotter) createSnapshot(ctx context.Context, kind snapshots.Kind, k } } + for label, value := range base.Labels { + fmt.Printf("Snapshot label: %s=%s\n", label, value) + } + contentId, idOk := base.Labels[devboxContentKey] useLimit, limitOk := base.Labels[newLayerLimitKey] removeContentId, removeIdOk := base.Labels[removeDevboxContentKey] diff --git a/snapshots/devbox/storage/bolt.go b/snapshots/devbox/storage/bolt.go index a22581cac205..8d6ccd379d6b 100644 --- a/snapshots/devbox/storage/bolt.go +++ b/snapshots/devbox/storage/bolt.go @@ -670,21 +670,21 @@ func withDevboxBucket(ctx context.Context, fn func(context.Context, *bolt.Bucket return fmt.Errorf("bucket does not exist: %w", errdefs.ErrNotFound) } - bkt := vbkt.Bucket(DevboxStoragePathBucket) + bkt := vbkt.Bucket(bucketKeySnapshot) if bkt == nil { // Create the devbox storage path bucket if it does not exist var err error - bkt, err = vbkt.CreateBucketIfNotExists(DevboxStoragePathBucket) + bkt, err = vbkt.CreateBucketIfNotExists(bucketKeySnapshot) if err != nil { return fmt.Errorf("failed to create devbox storage path bucket: %w", err) } } - dbkt := bkt.Bucket(DevboxStoragePathBucket) + dbkt := vbkt.Bucket(DevboxStoragePathBucket) if dbkt == nil { // Create the devbox storage path bucket if it does not exist var err error - dbkt, err = bkt.CreateBucketIfNotExists(DevboxStoragePathBucket) + dbkt, err = vbkt.CreateBucketIfNotExists(DevboxStoragePathBucket) if err != nil { return fmt.Errorf("failed to create devbox storage path bucket: %w", err) } @@ -739,7 +739,10 @@ func SetDevboxContent(ctx context.Context, key, contentKey, lvName, path string) return fmt.Errorf("devbox storage path bucket does not exist: %w", errdefs.ErrNotFound) } fmt.Printf("devbox storage path bucket: %s\n", key) - bkt.Put(DevboxKeyContentID, []byte(contentKey)) + err := bkt.Put(DevboxKeyContentID, []byte(contentKey)) + if err != nil { + return fmt.Errorf("failed to set content ID for key %s: %w", key, err) + } if dbkt == nil { return fmt.Errorf("devbox storage path bucket does not exist: %w", errdefs.ErrNotFound) } From e0445fe544200849d475456c7eef83d4909b9e49 Mon Sep 17 00:00:00 2001 From: luanshaotong Date: Thu, 17 Jul 2025 07:57:58 +0000 Subject: [PATCH 04/19] copp last layer to newsnapshot --- container_opts.go | 16 +- go.mod | 2 + go.sum | 4 + snapshots/devbox/devbox.go | 71 +++- snapshots/devbox/plugin/plugin.go | 2 +- snapshots/devbox/storage/bolt.go | 99 +++-- vendor/github.com/otiai10/copy/.gitignore | 9 + vendor/github.com/otiai10/copy/LICENSE | 21 ++ vendor/github.com/otiai10/copy/README.md | 127 +++++++ vendor/github.com/otiai10/copy/copy.go | 352 ++++++++++++++++++ .../otiai10/copy/copy_namedpipes.go | 17 + .../otiai10/copy/copy_namedpipes_x.go | 14 + vendor/github.com/otiai10/copy/options.go | 174 +++++++++ .../otiai10/copy/permission_control.go | 49 +++ .../otiai10/copy/preserve_ltimes.go | 19 + .../otiai10/copy/preserve_ltimes_x.go | 7 + .../github.com/otiai10/copy/preserve_owner.go | 23 ++ .../otiai10/copy/preserve_owner_x.go | 9 + .../github.com/otiai10/copy/preserve_times.go | 11 + vendor/github.com/otiai10/copy/stat_times.go | 21 ++ .../otiai10/copy/stat_times_darwin.go | 19 + .../otiai10/copy/stat_times_freebsd.go | 19 + .../github.com/otiai10/copy/stat_times_js.go | 19 + .../otiai10/copy/stat_times_windows.go | 18 + .../github.com/otiai10/copy/stat_times_x.go | 17 + .../github.com/otiai10/copy/symlink_test_x.go | 45 +++ vendor/github.com/otiai10/mint/.gitignore | 2 + vendor/github.com/otiai10/mint/LICENSE | 7 + vendor/github.com/otiai10/mint/README.md | 62 +++ vendor/github.com/otiai10/mint/because.go | 15 + vendor/github.com/otiai10/mint/comparer.go | 53 +++ vendor/github.com/otiai10/mint/exit.go | 41 ++ .../github.com/otiai10/mint/exit_freebsd.go | 10 + vendor/github.com/otiai10/mint/log.go | 15 + vendor/github.com/otiai10/mint/mint.go | 86 +++++ vendor/github.com/otiai10/mint/mocks.go | 30 ++ .../github.com/otiai10/mint/mquery/README.md | 31 ++ .../github.com/otiai10/mint/mquery/mquery.go | 72 ++++ vendor/github.com/otiai10/mint/result.go | 23 ++ vendor/github.com/otiai10/mint/testee.go | 145 ++++++++ vendor/modules.txt | 7 + 41 files changed, 1726 insertions(+), 57 deletions(-) create mode 100644 vendor/github.com/otiai10/copy/.gitignore create mode 100644 vendor/github.com/otiai10/copy/LICENSE create mode 100644 vendor/github.com/otiai10/copy/README.md create mode 100644 vendor/github.com/otiai10/copy/copy.go create mode 100644 vendor/github.com/otiai10/copy/copy_namedpipes.go create mode 100644 vendor/github.com/otiai10/copy/copy_namedpipes_x.go create mode 100644 vendor/github.com/otiai10/copy/options.go create mode 100644 vendor/github.com/otiai10/copy/permission_control.go create mode 100644 vendor/github.com/otiai10/copy/preserve_ltimes.go create mode 100644 vendor/github.com/otiai10/copy/preserve_ltimes_x.go create mode 100644 vendor/github.com/otiai10/copy/preserve_owner.go create mode 100644 vendor/github.com/otiai10/copy/preserve_owner_x.go create mode 100644 vendor/github.com/otiai10/copy/preserve_times.go create mode 100644 vendor/github.com/otiai10/copy/stat_times.go create mode 100644 vendor/github.com/otiai10/copy/stat_times_darwin.go create mode 100644 vendor/github.com/otiai10/copy/stat_times_freebsd.go create mode 100644 vendor/github.com/otiai10/copy/stat_times_js.go create mode 100644 vendor/github.com/otiai10/copy/stat_times_windows.go create mode 100644 vendor/github.com/otiai10/copy/stat_times_x.go create mode 100644 vendor/github.com/otiai10/copy/symlink_test_x.go create mode 100644 vendor/github.com/otiai10/mint/.gitignore create mode 100644 vendor/github.com/otiai10/mint/LICENSE create mode 100644 vendor/github.com/otiai10/mint/README.md create mode 100644 vendor/github.com/otiai10/mint/because.go create mode 100644 vendor/github.com/otiai10/mint/comparer.go create mode 100644 vendor/github.com/otiai10/mint/exit.go create mode 100644 vendor/github.com/otiai10/mint/exit_freebsd.go create mode 100644 vendor/github.com/otiai10/mint/log.go create mode 100644 vendor/github.com/otiai10/mint/mint.go create mode 100644 vendor/github.com/otiai10/mint/mocks.go create mode 100644 vendor/github.com/otiai10/mint/mquery/README.md create mode 100644 vendor/github.com/otiai10/mint/mquery/mquery.go create mode 100644 vendor/github.com/otiai10/mint/result.go create mode 100644 vendor/github.com/otiai10/mint/testee.go diff --git a/container_opts.go b/container_opts.go index 7afd46281bb8..7c7a82c43265 100644 --- a/container_opts.go +++ b/container_opts.go @@ -21,6 +21,7 @@ import ( "encoding/json" "errors" "fmt" + "strings" "github.com/containerd/containerd/containers" "github.com/containerd/containerd/content" @@ -236,21 +237,14 @@ func WithNewSnapshot(id string, i Image, opts ...snapshots.Opt) NewContainerOpts } } start_opts := []snapshots.Opt{} - // if sealos.io/devbox/use-limit is set, move it to containerd.io/snapshot-new-layer-limit - if limit, ok := base.Labels["sealos.io/devbox-use-limit"]; ok { - if limit != "" { + for label, value := range base.Labels { + // if label start with "devbox.sealos.io/", transform it to "containerd.io/snapshot/" + if strings.HasPrefix(label, "devbox.sealos.io/") { start_opts = append(start_opts, snapshots.WithLabels(map[string]string{ - "containerd.io/snapshot/new-layer-limit": limit, + "containerd.io/snapshot/devbox-" + label[len("devbox.sealos.io/"):]: value, })) - fmt.Printf("Using devbox layer limit: %s\n", limit) } } - if contentId, ok := base.Labels["containerd.io/snapshot-devbox-content-id"]; ok && contentId != "" { - start_opts = append(start_opts, snapshots.WithLabels(map[string]string{ - "containerd.io/snapshot/devbox-content-id": contentId, - })) - fmt.Printf("Using devbox content ID: %s\n", contentId) - } start_opts = append(start_opts, opts...) if _, err := s.Prepare(ctx, id, parent, start_opts...); err != nil { return err diff --git a/go.mod b/go.mod index 41678aba1c27..68e5b560a5e9 100644 --- a/go.mod +++ b/go.mod @@ -57,6 +57,7 @@ require ( github.com/opencontainers/runtime-tools v0.9.1-0.20221107090550-2e043c6bd626 github.com/opencontainers/selinux v1.11.0 github.com/openebs/lvm-localpv v1.7.0 + github.com/otiai10/copy v1.14.1 github.com/pelletier/go-toml v1.9.5 github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.16.0 @@ -121,6 +122,7 @@ require ( github.com/moby/spdystream v0.2.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/otiai10/mint v1.6.3 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/client_model v0.4.0 // indirect github.com/prometheus/common v0.44.0 // indirect diff --git a/go.sum b/go.sum index b83db85557fa..b864207b4e88 100644 --- a/go.sum +++ b/go.sum @@ -630,6 +630,10 @@ github.com/opencontainers/selinux v1.11.0/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M5 github.com/openebs/lvm-localpv v1.7.0 h1:fjqzMuAYJcV8gjzLPteXr3oiiVXuZvuI67fz20Ubn4k= github.com/openebs/lvm-localpv v1.7.0/go.mod h1:/kaYdEZ/5wyRWwTJdrVzQu/u9iGuZ2U7msIl++XBp5o= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/otiai10/copy v1.14.1 h1:5/7E6qsUMBaH5AnQ0sSLzzTg1oTECmcCmT6lvF45Na8= +github.com/otiai10/copy v1.14.1/go.mod h1:oQwrEDDOci3IM8dJF0d8+jnbfPDllW6vUjNc3DoZm9I= +github.com/otiai10/mint v1.6.3 h1:87qsV/aw1F5as1eH1zS/yqHY85ANKVMgkDrf9rcxbQs= +github.com/otiai10/mint v1.6.3/go.mod h1:MJm72SBthJjz8qhefc4z1PYEieWmy8Bku7CjcAqyUSM= github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= diff --git a/snapshots/devbox/devbox.go b/snapshots/devbox/devbox.go index 6bb8c8120c29..298126324bd3 100644 --- a/snapshots/devbox/devbox.go +++ b/snapshots/devbox/devbox.go @@ -28,6 +28,8 @@ import ( "strings" "syscall" + cp "github.com/otiai10/copy" + "github.com/containerd/containerd/mount" "github.com/containerd/containerd/snapshots" "github.com/containerd/containerd/snapshots/overlay/overlayutils" @@ -48,9 +50,10 @@ import ( // the change set between this snapshot and its parent is stored. const upperdirKey = "containerd.io/snapshot/overlay.upperdir" -const newLayerLimitKey = "containerd.io/snapshot/new-layer-limit" -const devboxContentKey = "containerd.io/snapshot/devbox-content-id" -const removeDevboxContentKey = "containerd.io/snapshot/devbox-remove-content-id" +const newLayerLimitKey = "containerd.io/snapshot/devbox-storage-limit" +const devboxContentIDKey = "containerd.io/snapshot/devbox-content-id" +const privateImageKey = "containerd.io/snapshot/devbox-init" +const removeContentIDKey = "containerd.io/snapshot/devbox-remove-content-id" // SnapshotterConfig is used to configure the overlay snapshotter instance type SnapshotterConfig struct { @@ -221,6 +224,11 @@ func (o *snapshotter) Stat(ctx context.Context, key string) (info snapshots.Info func (o *snapshotter) Update(ctx context.Context, info snapshots.Info, fieldpaths ...string) (newInfo snapshots.Info, err error) { err = o.ms.WithTransaction(ctx, true, func(ctx context.Context) error { + + if value, ok := info.Labels[removeContentIDKey]; ok { + storage.RemoveDevboxContent(ctx, value) + } + newInfo, err = storage.UpdateInfo(ctx, info, fieldpaths...) if err != nil { return err @@ -363,7 +371,7 @@ func (o *snapshotter) Remove(ctx context.Context, key string) (err error) { return fmt.Errorf("failed to remove devbox content for snapshot %s: %w", key, err) } if mountPath != "" { - if err := o.unmountLvm(ctx, mountPath); err != nil { + if err = o.unmountLvm(ctx, mountPath); err != nil { log.G(ctx).WithError(err).WithField("path", mountPath).Warn("failed to unmount directory") } } @@ -621,7 +629,7 @@ func (o *snapshotter) createSnapshot(ctx context.Context, kind snapshots.Kind, k base := snapshots.Info{} for _, opt := range opts { - if err := opt(&base); err != nil { + if err = opt(&base); err != nil { return nil, fmt.Errorf("failed to apply snapshot option: %w", err) } } @@ -630,20 +638,24 @@ func (o *snapshotter) createSnapshot(ctx context.Context, kind snapshots.Kind, k fmt.Printf("Snapshot label: %s=%s\n", label, value) } - contentId, idOk := base.Labels[devboxContentKey] + contentId, idOk := base.Labels[devboxContentIDKey] useLimit, limitOk := base.Labels[newLayerLimitKey] - removeContentId, removeIdOk := base.Labels[removeDevboxContentKey] - - if err := o.ms.WithTransaction(ctx, true, func(ctx context.Context) (err error) { - if removeIdOk { - storage.SetDevboxContentStatusRemove(ctx, removeContentId) - } + _, privateImageOk := base.Labels[privateImageKey] + if err = o.ms.WithTransaction(ctx, true, func(ctx context.Context) (err error) { snapshotDir := filepath.Join(o.root, "snapshots") - s, err = storage.CreateSnapshot(ctx, kind, key, parent, opts...) + directParent := parent + if privateImageOk { + directParent, err = storage.GetParentID(ctx, parent) + if err != nil { + return fmt.Errorf("failed to get parent ID for private image: %w", err) + } + } + + s, err = storage.CreateSnapshot(ctx, kind, key, directParent, opts...) if err != nil { - return fmt.Errorf("failed to mount LVM logical volume %s: %w", lvName, err) + return fmt.Errorf("failed to create snapshot: %w", err) } fmt.Println("Created snapshot:", s.ID) @@ -657,7 +669,8 @@ func (o *snapshotter) createSnapshot(ctx context.Context, kind snapshots.Kind, k if notExistErr == nil && lvName != "" { // mount point for the snapshot fmt.Println("LVM logical volume name found for content ID:", contentId, "is", lvName) - if isMounted, err := isMountPoint(npath); err != nil { + var isMounted bool + if isMounted, err = isMountPoint(npath); err != nil { return fmt.Errorf("failed to check if path is a mount point: %w", err) } else if isMounted { log.G(ctx).Infof("Path %s is already mounted, skipping mount", npath) @@ -691,6 +704,29 @@ func (o *snapshotter) createSnapshot(ctx context.Context, kind snapshots.Kind, k if err != nil { return fmt.Errorf("failed to prepare LVM directory for snapshot: %w", err) } + + if privateImageOk { + var parentID string + parentID, err = storage.GetID(ctx, parent) + if err != nil { + return fmt.Errorf("failed to get parent ID for private image: %w", err) + } + parent_upperdir := o.upperPath(parentID) + // copy all contents from parent upperdir to new snapshot upperdir + // TODO: maybe move instead of copy? + opt := cp.Options{ + OnSymlink: func(src string) cp.SymlinkAction { + return cp.Shallow + }, + PreserveTimes: true, + PreserveOwner: true, + } + if err = cp.Copy(parent_upperdir, filepath.Join(td, "fs"), opt); err != nil { + return fmt.Errorf("failed to copy parent upperdir to new snapshot upperdir: %w, from %s to %s", err, parent_upperdir, td) + } + fmt.Println("Copied parent upperdir to new snapshot upperdir:", td) + } + fmt.Println("Prepared LVM directory for snapshot:", td, "with logical volume name:", lvName) storage.SetDevboxContent(ctx, key, contentId, lvName, npath) if err != nil { @@ -706,13 +742,14 @@ func (o *snapshotter) createSnapshot(ctx context.Context, kind snapshots.Kind, k } if len(s.ParentIDs) > 0 { - st, err := os.Stat(o.upperPath(s.ParentIDs[0])) + var st os.FileInfo + st, err = os.Stat(o.upperPath(s.ParentIDs[0])) if err != nil { return fmt.Errorf("failed to stat parent: %w", err) } stat := st.Sys().(*syscall.Stat_t) - if err := os.Lchown(filepath.Join(td, "fs"), int(stat.Uid), int(stat.Gid)); err != nil { + if err = os.Lchown(filepath.Join(td, "fs"), int(stat.Uid), int(stat.Gid)); err != nil { return fmt.Errorf("failed to chown: %w", err) } } diff --git a/snapshots/devbox/plugin/plugin.go b/snapshots/devbox/plugin/plugin.go index a9d56a39e078..c88722f8e0e4 100644 --- a/snapshots/devbox/plugin/plugin.go +++ b/snapshots/devbox/plugin/plugin.go @@ -70,7 +70,7 @@ func init() { if config.lvmVgName == "" { // If no LVM VG name is provided, use the default. - config.lvmVgName = "ubuntu-vg" + config.lvmVgName = "devbox-lvm-vg" } oOpts = append(oOpts, devbox.WithLvmVgName(config.lvmVgName)) diff --git a/snapshots/devbox/storage/bolt.go b/snapshots/devbox/storage/bolt.go index 8d6ccd379d6b..0937039301c6 100644 --- a/snapshots/devbox/storage/bolt.go +++ b/snapshots/devbox/storage/bolt.go @@ -702,17 +702,19 @@ func GetDevboxLvName(ctx context.Context, contentKey string) (string, error) { return "", fmt.Errorf("content key cannot be empty") } - err := withDevboxBucket(ctx, func(ctx context.Context, _ *bolt.Bucket, dbkt *bolt.Bucket) error { - fmt.Printf("devbox storage path bucket1\n") + err := withDevboxBucket(ctx, func(ctx context.Context, bkt *bolt.Bucket, dbkt *bolt.Bucket) error { if dbkt == nil { - return fmt.Errorf("devbox storage path bucket does not exist: %w", errdefs.ErrNotFound) + return fmt.Errorf("devbox contentID bucket does not exist: %w", errdefs.ErrNotFound) } sdbkt := dbkt.Bucket([]byte(contentKey)) - fmt.Printf("devbox storage path bucket2: %s\n", contentKey) if sdbkt == nil { return errdefs.ErrNotFound } + if mountPath := sdbkt.Get(DevboxKeyPath); mountPath != nil { + return fmt.Errorf("devbox lv is already mounted at %s: %w", string(mountPath), errdefs.ErrAlreadyExists) + } + lvNameByte := sdbkt.Get(DevboxKeyLvName) // if len(lvName) == 0 { // no need for this check, as snapshotter will recreate the LVM if lvName is empty // return fmt.Errorf("LVM name for content key %s not found: %w", contentKey, errdefs.ErrNotFound) @@ -728,8 +730,8 @@ func GetDevboxLvName(ctx context.Context, contentKey string) (string, error) { return lvName, nil } -func SetDevboxContent(ctx context.Context, key, contentKey, lvName, path string) error { - if contentKey == "" || lvName == "" || path == "" { +func SetDevboxContent(ctx context.Context, key, contentID, lvName, path string) error { + if contentID == "" || lvName == "" || path == "" { return fmt.Errorf("content key and storage path cannot be empty") } @@ -739,44 +741,44 @@ func SetDevboxContent(ctx context.Context, key, contentKey, lvName, path string) return fmt.Errorf("devbox storage path bucket does not exist: %w", errdefs.ErrNotFound) } fmt.Printf("devbox storage path bucket: %s\n", key) - err := bkt.Put(DevboxKeyContentID, []byte(contentKey)) + err := bkt.Put(DevboxKeyContentID, []byte(contentID)) if err != nil { return fmt.Errorf("failed to set content ID for key %s: %w", key, err) } if dbkt == nil { return fmt.Errorf("devbox storage path bucket does not exist: %w", errdefs.ErrNotFound) } - sdbkt, err := dbkt.CreateBucketIfNotExists([]byte(contentKey)) + sdbkt, err := dbkt.CreateBucketIfNotExists([]byte(contentID)) if err != nil { - return fmt.Errorf("failed to create bucket for content key %s: %w", contentKey, err) + return fmt.Errorf("failed to create bucket for content key %s: %w", contentID, err) } if err := sdbkt.Put([]byte(DevboxKeyLvName), []byte(lvName)); err != nil { - return fmt.Errorf("failed to set storage path for content key %s: %w", contentKey, err) + return fmt.Errorf("failed to set storage path for content key %s: %w", contentID, err) } if err := sdbkt.Put([]byte(DevboxKeyPath), []byte(path)); err != nil { - return fmt.Errorf("failed to set storage path for content key %s: %w", contentKey, err) + return fmt.Errorf("failed to set storage path for content key %s: %w", contentID, err) } if err := sdbkt.Put([]byte(DevboxKeyStatus), []byte(DevboxStatusActive)); err != nil { - return fmt.Errorf("failed to set status for content key %s: %w", contentKey, err) + return fmt.Errorf("failed to set status for content key %s: %w", contentID, err) } return nil }) } -func SetDevboxContentStatusRemove(ctx context.Context, contentKey string) error { - if contentKey == "" { +func SetDevboxContentStatusRemove(ctx context.Context, contentID string) error { + if contentID == "" { return fmt.Errorf("content key cannot be empty") } return withDevboxBucket(ctx, func(ctx context.Context, _ *bolt.Bucket, dbkt *bolt.Bucket) error { - sdbkt := dbkt.Bucket([]byte(contentKey)) + sdbkt := dbkt.Bucket([]byte(contentID)) if sdbkt == nil { - return fmt.Errorf("devbox storage path bucket for content key %s does not exist: %w", contentKey, errdefs.ErrNotFound) + return fmt.Errorf("devbox storage path bucket for content key %s does not exist: %w", contentID, errdefs.ErrNotFound) } if err := sdbkt.Put(DevboxKeyStatus, DevboxStatusRemoved); err != nil { - return fmt.Errorf("failed to set status for content key %s: %w", contentKey, err) + return fmt.Errorf("failed to set status for content key %s: %w", contentID, err) } - fmt.Printf("Set devbox content status for key: %s, status: %s\n", contentKey, DevboxStatusRemoved) + fmt.Printf("Set devbox content status for key: %s, status: %s\n", contentID, DevboxStatusRemoved) return nil }) } @@ -804,20 +806,25 @@ func RemoveDevboxContent(ctx context.Context, Key string) (string, error) { if contentID == nil { // return fmt.Errorf("content ID for key %s not found: %w", Key, errdefs.ErrNotFound) } - if err := bkt.DeleteBucket([]byte(Key)); err != nil { - if errors.Is(err, bolt.ErrBucketNotFound) { - return fmt.Errorf("storage path for content key %s not found: %w", Key, errdefs.ErrNotFound) - } - return fmt.Errorf("failed to delete storage path for content key %s: %w", Key, err) - } + // if err := bkt.DeleteBucket([]byte(Key)); err != nil { + // if errors.Is(err, bolt.ErrBucketNotFound) { + // return fmt.Errorf("storage path for content key %s not found: %w", Key, errdefs.ErrNotFound) + // } + // return fmt.Errorf("failed to delete storage path for content key %s: %w", Key, err) + // } sdbkt := dbkt.Bucket([]byte(contentID)) if sdbkt == nil { return fmt.Errorf("devbox storage path bucket for content ID %s does not exist: %w", string(contentID), errdefs.ErrNotFound) } - if status := sdbkt.Get(DevboxKeyStatus); status != nil && string(status) == string(DevboxStatusRemoved) { - dbkt.Delete([]byte(contentID)) - fmt.Printf("Removed devbox content for key: %s, content ID: %s\n", Key, string(contentID)) - return nil + if status := sdbkt.Get(DevboxKeyStatus); status != nil { + if string(status) == string(DevboxStatusRemoved) { + // remove the bucket if it is already marked as removed + dbkt.DeleteBucket([]byte(contentID)) + fmt.Printf("Removed devbox content for key: %s, content ID: %s\n", Key, string(contentID)) + } else { + // if the status is not removed, only remove the mount path + sdbkt.Delete([]byte(DevboxKeyPath)) + } } return nil }) @@ -847,3 +854,39 @@ func GetDevboxLvNames(ctx context.Context) (map[string]string, error) { return m, nil } + +func GetParentID(ctx context.Context, key string) (string, error) { + var ( + id string + ) + err := withSnapshotBucket(ctx, key, func(ctx context.Context, bkt, _ *bolt.Bucket) error { + if bkt == nil { + return fmt.Errorf("snapshot %v: %w", key, errdefs.ErrNotFound) + } + id = string(bkt.Get(bucketKeyParent)) + return nil + }) + if err != nil { + return "", err + } + + return id, nil +} + +func GetID(ctx context.Context, key string) (string, error) { + var ( + id string + ) + err := withSnapshotBucket(ctx, key, func(ctx context.Context, bkt, _ *bolt.Bucket) error { + if bkt == nil { + return fmt.Errorf("snapshot %v: %w", key, errdefs.ErrNotFound) + } + id = fmt.Sprintf("%d", readID(bkt)) + return nil + }) + if err != nil { + return "", err + } + + return id, nil +} diff --git a/vendor/github.com/otiai10/copy/.gitignore b/vendor/github.com/otiai10/copy/.gitignore new file mode 100644 index 000000000000..a79348558690 --- /dev/null +++ b/vendor/github.com/otiai10/copy/.gitignore @@ -0,0 +1,9 @@ +test/data.copy +test/owned-by-root +coverage.txt +vendor +.vagrant +.idea/ + +# Test Specific +test/data/case16/large.file diff --git a/vendor/github.com/otiai10/copy/LICENSE b/vendor/github.com/otiai10/copy/LICENSE new file mode 100644 index 000000000000..1f0cc5dec70f --- /dev/null +++ b/vendor/github.com/otiai10/copy/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2018 otiai10 + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/otiai10/copy/README.md b/vendor/github.com/otiai10/copy/README.md new file mode 100644 index 000000000000..9f3dce74da06 --- /dev/null +++ b/vendor/github.com/otiai10/copy/README.md @@ -0,0 +1,127 @@ +# copy + +[![Go Reference](https://pkg.go.dev/badge/github.com/otiai10/copy.svg)](https://pkg.go.dev/github.com/otiai10/copy) +[![Actions Status](https://github.com/otiai10/copy/workflows/Go/badge.svg)](https://github.com/otiai10/copy/actions) +[![codecov](https://codecov.io/gh/otiai10/copy/branch/main/graph/badge.svg)](https://codecov.io/gh/otiai10/copy) +[![License: MIT](https://img.shields.io/badge/License-MIT-green.svg)](https://github.com/otiai10/copy/blob/main/LICENSE) +[![FOSSA Status](https://app.fossa.com/api/projects/git%2Bgithub.com%2Fotiai10%2Fcopy.svg?type=shield)](https://app.fossa.com/projects/git%2Bgithub.com%2Fotiai10%2Fcopy?ref=badge_shield) +[![CodeQL](https://github.com/otiai10/copy/actions/workflows/codeql-analysis.yml/badge.svg)](https://github.com/otiai10/copy/actions/workflows/codeql-analysis.yml) +[![Go Report Card](https://goreportcard.com/badge/github.com/otiai10/copy)](https://goreportcard.com/report/github.com/otiai10/copy) +[![GitHub tag (latest SemVer)](https://img.shields.io/github/v/tag/otiai10/copy?sort=semver)](https://pkg.go.dev/github.com/otiai10/copy) +[![Docker Test](https://github.com/otiai10/copy/actions/workflows/docker-test.yml/badge.svg)](https://github.com/otiai10/copy/actions/workflows/docker-test.yml) +[![Vagrant Test](https://github.com/otiai10/copy/actions/workflows/vagrant-test.yml/badge.svg)](https://github.com/otiai10/copy/actions/workflows/vagrant-test.yml) +[![GopherJS](https://github.com/otiai10/copy/actions/workflows/gopherjs.yml/badge.svg)](https://github.com/otiai10/copy/actions/workflows/gopherjs.yml) +[![Go WASM](https://github.com/otiai10/copy/actions/workflows/wasm.yml/badge.svg)](https://github.com/otiai10/copy/actions/workflows/wasm.yml) + +`copy` copies directories recursively. + +# Example Usage + +```go +package main + +import ( + "fmt" + cp "github.com/otiai10/copy" +) + +func main() { + err := cp.Copy("your/src", "your/dest") + fmt.Println(err) // nil +} +``` + +# Advanced Usage + +```go +// Options specifies optional actions on copying. +type Options struct { + + // OnSymlink can specify what to do on symlink + OnSymlink func(src string) SymlinkAction + + // OnDirExists can specify what to do when there is a directory already existing in destination. + OnDirExists func(src, dest string) DirExistsAction + + // OnError can let users decide how to handle errors (e.g., you can suppress specific error). + OnError func(src, dest, string, err error) error + + // Skip can specify which files should be skipped + Skip func(srcinfo os.FileInfo, src, dest string) (bool, error) + + // RenameDestination can rename destination. + // If not set, nil, it does nothing. + RenameDestination func(src, dest string) (string, error) + + // PermissionControl can control permission of + // every entry. + // When you want to add permission 0222, do like + // + // PermissionControl = AddPermission(0222) + // + // or if you even don't want to touch permission, + // + // PermissionControl = DoNothing + // + // By default, PermissionControl = PreservePermission + PermissionControl PermissionControlFunc + + // Sync file after copy. + // Useful in case when file must be on the disk + // (in case crash happens, for example), + // at the expense of some performance penalty + Sync bool + + // Preserve the atime and the mtime of the entries + // On linux we can preserve only up to 1 millisecond accuracy + PreserveTimes bool + + // Preserve the uid and the gid of all entries. + PreserveOwner bool + + // The byte size of the buffer to use for copying files. + // If zero, the internal default buffer of 32KB is used. + // See https://golang.org/pkg/io/#CopyBuffer for more information. + CopyBufferSize uint + + // If you want to add some limitation on reading src file, + // you can wrap the src and provide new reader, + // such as `RateLimitReader` in the test case. + WrapReader func(src io.Reader) io.Reader + + // If given, copy.Copy refers to this fs.FS instead of the OS filesystem. + // e.g., You can use embed.FS to copy files from embedded filesystem. + FS fs.FS + + // NumOfWorkers represents the number of workers used for + // concurrent copying contents of directories. + // If 0 or 1, it does not use goroutine for copying directories. + // Please refer to https://pkg.go.dev/golang.org/x/sync/semaphore for more details. + NumOfWorkers int64 + + // PreferConcurrent is a function to determine whether or not + // to use goroutine for copying contents of directories. + // If PreferConcurrent is nil, which is default, it does concurrent + // copying for all directories. + // If NumOfWorkers is 0 or 1, this function will be ignored. + PreferConcurrent func(srcdir, destdir string) (bool, error) +} +``` + +```go +// For example... +opt := Options{ + Skip: func(info os.FileInfo, src, dest string) (bool, error) { + return strings.HasSuffix(src, ".git"), nil + }, +} +err := Copy("your/directory", "your/directory.copy", opt) +``` + +# Issues + +- https://github.com/otiai10/copy/issues + + +## License +[![FOSSA Status](https://app.fossa.com/api/projects/git%2Bgithub.com%2Fotiai10%2Fcopy.svg?type=large)](https://app.fossa.com/projects/git%2Bgithub.com%2Fotiai10%2Fcopy?ref=badge_large) diff --git a/vendor/github.com/otiai10/copy/copy.go b/vendor/github.com/otiai10/copy/copy.go new file mode 100644 index 000000000000..f9787cd9cf71 --- /dev/null +++ b/vendor/github.com/otiai10/copy/copy.go @@ -0,0 +1,352 @@ +package copy + +import ( + "context" + "io" + "io/fs" + "os" + "path/filepath" + "time" + + "golang.org/x/sync/errgroup" + "golang.org/x/sync/semaphore" +) + +type timespec struct { + Mtime time.Time + Atime time.Time + Ctime time.Time +} + +// Copy copies src to dest, doesn't matter if src is a directory or a file. +func Copy(src, dest string, opts ...Options) error { + opt := assureOptions(src, dest, opts...) + if opt.NumOfWorkers > 1 { + opt.intent.sem = semaphore.NewWeighted(opt.NumOfWorkers) + opt.intent.ctx = context.Background() + } + if opt.FS != nil { + info, err := fs.Stat(opt.FS, src) + if err != nil { + return onError(src, dest, err, opt) + } + return switchboard(src, dest, info, opt) + } + info, err := os.Lstat(src) + if err != nil { + return onError(src, dest, err, opt) + } + return switchboard(src, dest, info, opt) +} + +// switchboard switches proper copy functions regarding file type, etc... +// If there would be anything else here, add a case to this switchboard. +func switchboard(src, dest string, info os.FileInfo, opt Options) (err error) { + if info.Mode()&os.ModeDevice != 0 && !opt.Specials { + return onError(src, dest, err, opt) + } + + if opt.RenameDestination != nil { + if dest, err = opt.RenameDestination(src, dest); err != nil { + return onError(src, dest, err, opt) + } + } + + switch { + case info.Mode()&os.ModeSymlink != 0: + err = onsymlink(src, dest, opt) + case info.IsDir(): + err = dcopy(src, dest, info, opt) + case info.Mode()&os.ModeNamedPipe != 0: + err = pcopy(dest, info) + default: + err = fcopy(src, dest, info, opt) + } + + return onError(src, dest, err, opt) +} + +// copyNextOrSkip decide if this src should be copied or not. +// Because this "copy" could be called recursively, +// "info" MUST be given here, NOT nil. +func copyNextOrSkip(src, dest string, info os.FileInfo, opt Options) error { + if opt.Skip != nil { + skip, err := opt.Skip(info, src, dest) + if err != nil { + return err + } + if skip { + return nil + } + } + return switchboard(src, dest, info, opt) +} + +// fcopy is for just a file, +// with considering existence of parent directory +// and file permission. +func fcopy(src, dest string, info os.FileInfo, opt Options) (err error) { + + var readcloser io.ReadCloser + if opt.FS != nil { + readcloser, err = opt.FS.Open(src) + } else { + readcloser, err = os.Open(src) + } + if err != nil { + if os.IsNotExist(err) { + return nil + } + return + } + defer fclose(readcloser, &err) + + if err = os.MkdirAll(filepath.Dir(dest), os.ModePerm); err != nil { + return + } + + f, err := os.Create(dest) + if err != nil { + return + } + defer fclose(f, &err) + + chmodfunc, err := opt.PermissionControl(info, dest) + if err != nil { + return err + } + chmodfunc(&err) + + var buf []byte = nil + var w io.Writer = f + var r io.Reader = readcloser + + if opt.WrapReader != nil { + r = opt.WrapReader(r) + } + + if opt.CopyBufferSize != 0 { + buf = make([]byte, opt.CopyBufferSize) + // Disable using `ReadFrom` by io.CopyBuffer. + // See https://github.com/otiai10/copy/pull/60#discussion_r627320811 for more details. + w = struct{ io.Writer }{f} + // r = struct{ io.Reader }{s} + } + + if _, err = io.CopyBuffer(w, r, buf); err != nil { + return err + } + + if opt.Sync { + err = f.Sync() + } + + if opt.PreserveOwner { + if err := preserveOwner(src, dest, info); err != nil { + return err + } + } + if opt.PreserveTimes { + if err := preserveTimes(info, dest); err != nil { + return err + } + } + + return +} + +// dcopy is for a directory, +// with scanning contents inside the directory +// and pass everything to "copy" recursively. +func dcopy(srcdir, destdir string, info os.FileInfo, opt Options) (err error) { + if skip, err := onDirExists(opt, srcdir, destdir); err != nil { + return err + } else if skip { + return nil + } + + // Make dest dir with 0755 so that everything writable. + chmodfunc, err := opt.PermissionControl(info, destdir) + if err != nil { + return err + } + defer chmodfunc(&err) + + var entries []fs.DirEntry + if opt.FS != nil { + entries, err = fs.ReadDir(opt.FS, srcdir) + if err != nil { + return err + } + } else { + entries, err = os.ReadDir(srcdir) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + } + + contents := make([]fs.FileInfo, 0, len(entries)) + for _, e := range entries { + info, err := e.Info() + if err != nil { + return err + } + contents = append(contents, info) + } + + if yes, err := shouldCopyDirectoryConcurrent(opt, srcdir, destdir); err != nil { + return err + } else if yes { + if err := dcopyConcurrent(srcdir, destdir, contents, opt); err != nil { + return err + } + } else { + if err := dcopySequential(srcdir, destdir, contents, opt); err != nil { + return err + } + } + + if opt.PreserveTimes { + if err := preserveTimes(info, destdir); err != nil { + return err + } + } + + if opt.PreserveOwner { + if err := preserveOwner(srcdir, destdir, info); err != nil { + return err + } + } + + return +} + +func dcopySequential(srcdir, destdir string, contents []os.FileInfo, opt Options) error { + for _, content := range contents { + cs, cd := filepath.Join(srcdir, content.Name()), filepath.Join(destdir, content.Name()) + + if err := copyNextOrSkip(cs, cd, content, opt); err != nil { + // If any error, exit immediately + return err + } + } + return nil +} + +// Copy this directory concurrently regarding semaphore of opt.intent +func dcopyConcurrent(srcdir, destdir string, contents []os.FileInfo, opt Options) error { + group, ctx := errgroup.WithContext(opt.intent.ctx) + getRoutine := func(cs, cd string, content os.FileInfo) func() error { + return func() error { + if content.IsDir() { + return copyNextOrSkip(cs, cd, content, opt) + } + if err := opt.intent.sem.Acquire(ctx, 1); err != nil { + return err + } + err := copyNextOrSkip(cs, cd, content, opt) + opt.intent.sem.Release(1) + return err + } + } + for _, content := range contents { + csd := filepath.Join(srcdir, content.Name()) + cdd := filepath.Join(destdir, content.Name()) + group.Go(getRoutine(csd, cdd, content)) + } + return group.Wait() +} + +func onDirExists(opt Options, srcdir, destdir string) (bool, error) { + _, err := os.Stat(destdir) + if err == nil && opt.OnDirExists != nil && destdir != opt.intent.dest { + switch opt.OnDirExists(srcdir, destdir) { + case Replace: + if err := os.RemoveAll(destdir); err != nil { + return false, err + } + case Untouchable: + return true, nil + } // case "Merge" is default behaviour. Go through. + } else if err != nil && !os.IsNotExist(err) { + return true, err // Unwelcome error type...! + } + return false, nil +} + +func onsymlink(src, dest string, opt Options) error { + switch opt.OnSymlink(src) { + case Shallow: + if err := lcopy(src, dest); err != nil { + return err + } + if opt.PreserveTimes { + return preserveLtimes(src, dest) + } + return nil + case Deep: + orig, err := os.Readlink(src) + if err != nil { + return err + } + if !filepath.IsAbs(orig) { + // orig is a relative link: need to add src dir to orig + orig = filepath.Join(filepath.Dir(src), orig) + } + info, err := os.Lstat(orig) + if err != nil { + return err + } + return copyNextOrSkip(orig, dest, info, opt) + case Skip: + fallthrough + default: + return nil // do nothing + } +} + +// lcopy is for a symlink, +// with just creating a new symlink by replicating src symlink. +func lcopy(src, dest string) error { + orig, err := os.Readlink(src) + // @See https://github.com/otiai10/copy/issues/111 + // TODO: This might be controlled by Options in the future. + if err != nil { + if os.IsNotExist(err) { // Copy symlink even if not existing + return os.Symlink(src, dest) + } + return err + } + + // @See https://github.com/otiai10/copy/issues/132 + // TODO: Control by SymlinkExistsAction + if _, err := os.Lstat(dest); err == nil { + if err := os.Remove(dest); err != nil { + return err + } + } + + return os.Symlink(orig, dest) +} + +// fclose ANYHOW closes file, +// with assigning error raised during Close, +// BUT respecting the error already reported. +func fclose(f io.Closer, reported *error) { + if err := f.Close(); *reported == nil { + *reported = err + } +} + +// onError lets caller to handle errors +// occurred when copying a file. +func onError(src, dest string, err error, opt Options) error { + if opt.OnError == nil { + return err + } + + return opt.OnError(src, dest, err) +} diff --git a/vendor/github.com/otiai10/copy/copy_namedpipes.go b/vendor/github.com/otiai10/copy/copy_namedpipes.go new file mode 100644 index 000000000000..657fb3812501 --- /dev/null +++ b/vendor/github.com/otiai10/copy/copy_namedpipes.go @@ -0,0 +1,17 @@ +//go:build !windows && !plan9 && !netbsd && !aix && !illumos && !solaris && !js + +package copy + +import ( + "os" + "path/filepath" + "syscall" +) + +// pcopy is for just named pipes +func pcopy(dest string, info os.FileInfo) error { + if err := os.MkdirAll(filepath.Dir(dest), os.ModePerm); err != nil { + return err + } + return syscall.Mkfifo(dest, uint32(info.Mode())) +} diff --git a/vendor/github.com/otiai10/copy/copy_namedpipes_x.go b/vendor/github.com/otiai10/copy/copy_namedpipes_x.go new file mode 100644 index 000000000000..da3d6f79678f --- /dev/null +++ b/vendor/github.com/otiai10/copy/copy_namedpipes_x.go @@ -0,0 +1,14 @@ +//go:build windows || plan9 || netbsd || aix || illumos || solaris || js + +package copy + +import ( + "os" +) + +// TODO: check plan9 netbsd aix illumos solaris in future + +// pcopy is for just named pipes. Windows doesn't support them +func pcopy(dest string, info os.FileInfo) error { + return nil +} diff --git a/vendor/github.com/otiai10/copy/options.go b/vendor/github.com/otiai10/copy/options.go new file mode 100644 index 000000000000..c1db48c8cfe4 --- /dev/null +++ b/vendor/github.com/otiai10/copy/options.go @@ -0,0 +1,174 @@ +package copy + +import ( + "context" + "io" + "io/fs" + "os" + + "golang.org/x/sync/semaphore" +) + +// Options specifies optional actions on copying. +type Options struct { + + // OnSymlink can specify what to do on symlink + OnSymlink func(src string) SymlinkAction + + // OnDirExists can specify what to do when there is a directory already existing in destination. + OnDirExists func(src, dest string) DirExistsAction + + // OnErr lets called decide whether or not to continue on particular copy error. + OnError func(src, dest string, err error) error + + // Skip can specify which files should be skipped + Skip func(srcinfo os.FileInfo, src, dest string) (bool, error) + + // RenameDestination can specify the destination file or dir name if needed to rename. + RenameDestination func(src, dest string) (string, error) + + // Specials includes special files to be copied. default false. + Specials bool + + // AddPermission to every entities, + // NO MORE THAN 0777 + // @OBSOLETE + // Use `PermissionControl = AddPermission(perm)` instead + AddPermission os.FileMode + + // PermissionControl can preserve or even add permission to + // every entries, for example + // + // opt.PermissionControl = AddPermission(0222) + // + // See permission_control.go for more detail. + PermissionControl PermissionControlFunc + + // Sync file after copy. + // Useful in case when file must be on the disk + // (in case crash happens, for example), + // at the expense of some performance penalty + Sync bool + + // Preserve the atime and the mtime of the entries. + // On linux we can preserve only up to 1 millisecond accuracy. + PreserveTimes bool + + // Preserve the uid and the gid of all entries. + PreserveOwner bool + + // The byte size of the buffer to use for copying files. + // If zero, the internal default buffer of 32KB is used. + // See https://golang.org/pkg/io/#CopyBuffer for more information. + CopyBufferSize uint + + // If you want to add some limitation on reading src file, + // you can wrap the src and provide new reader, + // such as `RateLimitReader` in the test case. + WrapReader func(src io.Reader) io.Reader + + // If given, copy.Copy refers to this fs.FS instead of the OS filesystem. + // e.g., You can use embed.FS to copy files from embedded filesystem. + FS fs.FS + + // NumOfWorkers represents the number of workers used for + // concurrent copying contents of directories. + // If 0 or 1, it does not use goroutine for copying directories. + // Please refer to https://pkg.go.dev/golang.org/x/sync/semaphore for more details. + NumOfWorkers int64 + + // PreferConcurrent is a function to determine whether or not + // to use goroutine for copying contents of directories. + // If PreferConcurrent is nil, which is default, it does concurrent + // copying for all directories. + // If NumOfWorkers is 0 or 1, this function will be ignored. + PreferConcurrent func(srcdir, destdir string) (bool, error) + + // Internal use only + intent intent +} + +type intent struct { + src string + dest string + sem *semaphore.Weighted + ctx context.Context +} + +// SymlinkAction represents what to do on symlink. +type SymlinkAction int + +const ( + // Deep creates hard-copy of contents. + Deep SymlinkAction = iota + // Shallow creates new symlink to the dest of symlink. + Shallow + // Skip does nothing with symlink. + Skip +) + +// DirExistsAction represents what to do on dest dir. +type DirExistsAction int + +const ( + // Merge preserves or overwrites existing files under the dir (default behavior). + Merge DirExistsAction = iota + // Replace deletes all contents under the dir and copy src files. + Replace + // Untouchable does nothing for the dir, and leaves it as it is. + Untouchable +) + +// getDefaultOptions provides default options, +// which would be modified by usage-side. +func getDefaultOptions(src, dest string) Options { + return Options{ + OnSymlink: func(string) SymlinkAction { + return Shallow // Do shallow copy + }, + OnDirExists: nil, // Default behavior is "Merge". + OnError: nil, // Default is "accept error" + Skip: nil, // Do not skip anything + AddPermission: 0, // Add nothing + PermissionControl: PerservePermission, // Just preserve permission + Sync: false, // Do not sync + Specials: false, // Do not copy special files + PreserveTimes: false, // Do not preserve the modification time + CopyBufferSize: 0, // Do not specify, use default bufsize (32*1024) + WrapReader: nil, // Do not wrap src files, use them as they are. + intent: intent{src, dest, nil, nil}, + } +} + +// assureOptions struct, should be called only once. +// All optional values MUST NOT BE nil/zero after assured. +func assureOptions(src, dest string, opts ...Options) Options { + defopt := getDefaultOptions(src, dest) + if len(opts) == 0 { + return defopt + } + if opts[0].OnSymlink == nil { + opts[0].OnSymlink = defopt.OnSymlink + } + if opts[0].Skip == nil { + opts[0].Skip = defopt.Skip + } + if opts[0].AddPermission > 0 { + opts[0].PermissionControl = AddPermission(opts[0].AddPermission) + } else if opts[0].PermissionControl == nil { + opts[0].PermissionControl = PerservePermission + } + opts[0].intent.src = defopt.intent.src + opts[0].intent.dest = defopt.intent.dest + return opts[0] +} + +func shouldCopyDirectoryConcurrent(opt Options, srcdir, destdir string) (bool, error) { + if opt.NumOfWorkers <= 1 { + return false, nil + } + if opt.PreferConcurrent == nil { + return true, nil + } + return opt.PreferConcurrent(srcdir, destdir) +} diff --git a/vendor/github.com/otiai10/copy/permission_control.go b/vendor/github.com/otiai10/copy/permission_control.go new file mode 100644 index 000000000000..901a84514eab --- /dev/null +++ b/vendor/github.com/otiai10/copy/permission_control.go @@ -0,0 +1,49 @@ +package copy + +import ( + "io/fs" + "os" +) + +const ( + // tmpPermissionForDirectory makes the destination directory writable, + // so that stuff can be copied recursively even if any original directory is NOT writable. + // See https://github.com/otiai10/copy/pull/9 for more information. + tmpPermissionForDirectory = os.FileMode(0755) +) + +type PermissionControlFunc func(srcinfo fs.FileInfo, dest string) (chmodfunc func(*error), err error) + +var ( + AddPermission = func(perm os.FileMode) PermissionControlFunc { + return func(srcinfo fs.FileInfo, dest string) (func(*error), error) { + orig := srcinfo.Mode() + if srcinfo.IsDir() { + if err := os.MkdirAll(dest, tmpPermissionForDirectory); err != nil { + return func(*error) {}, err + } + } + return func(err *error) { + chmod(dest, orig|perm, err) + }, nil + } + } + PerservePermission PermissionControlFunc = AddPermission(0) + DoNothing PermissionControlFunc = func(srcinfo fs.FileInfo, dest string) (func(*error), error) { + if srcinfo.IsDir() { + if err := os.MkdirAll(dest, srcinfo.Mode()); err != nil { + return func(*error) {}, err + } + } + return func(*error) {}, nil + } +) + +// chmod ANYHOW changes file mode, +// with assigning error raised during Chmod, +// BUT respecting the error already reported. +func chmod(dir string, mode os.FileMode, reported *error) { + if err := os.Chmod(dir, mode); *reported == nil { + *reported = err + } +} diff --git a/vendor/github.com/otiai10/copy/preserve_ltimes.go b/vendor/github.com/otiai10/copy/preserve_ltimes.go new file mode 100644 index 000000000000..6b6787b2afff --- /dev/null +++ b/vendor/github.com/otiai10/copy/preserve_ltimes.go @@ -0,0 +1,19 @@ +//go:build !windows && !plan9 && !js + +package copy + +import ( + "golang.org/x/sys/unix" +) + +func preserveLtimes(src, dest string) error { + info := new(unix.Stat_t) + if err := unix.Lstat(src, info); err != nil { + return err + } + + return unix.Lutimes(dest, []unix.Timeval{ + unix.NsecToTimeval(info.Atim.Nano()), + unix.NsecToTimeval(info.Mtim.Nano()), + }) +} diff --git a/vendor/github.com/otiai10/copy/preserve_ltimes_x.go b/vendor/github.com/otiai10/copy/preserve_ltimes_x.go new file mode 100644 index 000000000000..5ef234d56335 --- /dev/null +++ b/vendor/github.com/otiai10/copy/preserve_ltimes_x.go @@ -0,0 +1,7 @@ +//go:build windows || js || plan9 + +package copy + +func preserveLtimes(src, dest string) error { + return nil // Unsupported +} diff --git a/vendor/github.com/otiai10/copy/preserve_owner.go b/vendor/github.com/otiai10/copy/preserve_owner.go new file mode 100644 index 000000000000..bd129644f98f --- /dev/null +++ b/vendor/github.com/otiai10/copy/preserve_owner.go @@ -0,0 +1,23 @@ +//go:build !windows && !plan9 + +package copy + +import ( + "io/fs" + "os" + "syscall" +) + +func preserveOwner(src, dest string, info fs.FileInfo) (err error) { + if info == nil { + if info, err = os.Stat(src); err != nil { + return err + } + } + if stat, ok := info.Sys().(*syscall.Stat_t); ok { + if err := os.Chown(dest, int(stat.Uid), int(stat.Gid)); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/otiai10/copy/preserve_owner_x.go b/vendor/github.com/otiai10/copy/preserve_owner_x.go new file mode 100644 index 000000000000..1e8f1251da64 --- /dev/null +++ b/vendor/github.com/otiai10/copy/preserve_owner_x.go @@ -0,0 +1,9 @@ +//go:build windows || plan9 + +package copy + +import "io/fs" + +func preserveOwner(src, dest string, info fs.FileInfo) (err error) { + return nil +} diff --git a/vendor/github.com/otiai10/copy/preserve_times.go b/vendor/github.com/otiai10/copy/preserve_times.go new file mode 100644 index 000000000000..d89b12898002 --- /dev/null +++ b/vendor/github.com/otiai10/copy/preserve_times.go @@ -0,0 +1,11 @@ +package copy + +import "os" + +func preserveTimes(srcinfo os.FileInfo, dest string) error { + spec := getTimeSpec(srcinfo) + if err := os.Chtimes(dest, spec.Atime, spec.Mtime); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/otiai10/copy/stat_times.go b/vendor/github.com/otiai10/copy/stat_times.go new file mode 100644 index 000000000000..49ea67c27e6c --- /dev/null +++ b/vendor/github.com/otiai10/copy/stat_times.go @@ -0,0 +1,21 @@ +//go:build !windows && !darwin && !freebsd && !plan9 && !netbsd && !js + +// TODO: add more runtimes + +package copy + +import ( + "os" + "syscall" + "time" +) + +func getTimeSpec(info os.FileInfo) timespec { + stat := info.Sys().(*syscall.Stat_t) + times := timespec{ + Mtime: info.ModTime(), + Atime: time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)), + Ctime: time.Unix(int64(stat.Ctim.Sec), int64(stat.Ctim.Nsec)), + } + return times +} diff --git a/vendor/github.com/otiai10/copy/stat_times_darwin.go b/vendor/github.com/otiai10/copy/stat_times_darwin.go new file mode 100644 index 000000000000..935ce1d79813 --- /dev/null +++ b/vendor/github.com/otiai10/copy/stat_times_darwin.go @@ -0,0 +1,19 @@ +//go:build darwin + +package copy + +import ( + "os" + "syscall" + "time" +) + +func getTimeSpec(info os.FileInfo) timespec { + stat := info.Sys().(*syscall.Stat_t) + times := timespec{ + Mtime: info.ModTime(), + Atime: time.Unix(stat.Atimespec.Sec, stat.Atimespec.Nsec), + Ctime: time.Unix(stat.Ctimespec.Sec, stat.Ctimespec.Nsec), + } + return times +} diff --git a/vendor/github.com/otiai10/copy/stat_times_freebsd.go b/vendor/github.com/otiai10/copy/stat_times_freebsd.go new file mode 100644 index 000000000000..1deb1cc4eb8c --- /dev/null +++ b/vendor/github.com/otiai10/copy/stat_times_freebsd.go @@ -0,0 +1,19 @@ +//go:build freebsd + +package copy + +import ( + "os" + "syscall" + "time" +) + +func getTimeSpec(info os.FileInfo) timespec { + stat := info.Sys().(*syscall.Stat_t) + times := timespec{ + Mtime: info.ModTime(), + Atime: time.Unix(int64(stat.Atimespec.Sec), int64(stat.Atimespec.Nsec)), + Ctime: time.Unix(int64(stat.Ctimespec.Sec), int64(stat.Ctimespec.Nsec)), + } + return times +} diff --git a/vendor/github.com/otiai10/copy/stat_times_js.go b/vendor/github.com/otiai10/copy/stat_times_js.go new file mode 100644 index 000000000000..a4b1e288f4af --- /dev/null +++ b/vendor/github.com/otiai10/copy/stat_times_js.go @@ -0,0 +1,19 @@ +//go:build js + +package copy + +import ( + "os" + "syscall" + "time" +) + +func getTimeSpec(info os.FileInfo) timespec { + stat := info.Sys().(*syscall.Stat_t) + times := timespec{ + Mtime: info.ModTime(), + Atime: time.Unix(int64(stat.Atime), int64(stat.AtimeNsec)), + Ctime: time.Unix(int64(stat.Ctime), int64(stat.CtimeNsec)), + } + return times +} diff --git a/vendor/github.com/otiai10/copy/stat_times_windows.go b/vendor/github.com/otiai10/copy/stat_times_windows.go new file mode 100644 index 000000000000..babfe7d9d9ef --- /dev/null +++ b/vendor/github.com/otiai10/copy/stat_times_windows.go @@ -0,0 +1,18 @@ +//go:build windows + +package copy + +import ( + "os" + "syscall" + "time" +) + +func getTimeSpec(info os.FileInfo) timespec { + stat := info.Sys().(*syscall.Win32FileAttributeData) + return timespec{ + Mtime: time.Unix(0, stat.LastWriteTime.Nanoseconds()), + Atime: time.Unix(0, stat.LastAccessTime.Nanoseconds()), + Ctime: time.Unix(0, stat.CreationTime.Nanoseconds()), + } +} diff --git a/vendor/github.com/otiai10/copy/stat_times_x.go b/vendor/github.com/otiai10/copy/stat_times_x.go new file mode 100644 index 000000000000..53da32e2ccaa --- /dev/null +++ b/vendor/github.com/otiai10/copy/stat_times_x.go @@ -0,0 +1,17 @@ +//go:build plan9 || netbsd + +package copy + +import ( + "os" +) + +// TODO: check plan9 netbsd in future +func getTimeSpec(info os.FileInfo) timespec { + times := timespec{ + Mtime: info.ModTime(), + Atime: info.ModTime(), + Ctime: info.ModTime(), + } + return times +} diff --git a/vendor/github.com/otiai10/copy/symlink_test_x.go b/vendor/github.com/otiai10/copy/symlink_test_x.go new file mode 100644 index 000000000000..1f6bb1f46c59 --- /dev/null +++ b/vendor/github.com/otiai10/copy/symlink_test_x.go @@ -0,0 +1,45 @@ +//go:build windows || plan9 || netbsd || aix || illumos || solaris || js + +package copy + +import ( + "os" + "testing" + + . "github.com/otiai10/mint" +) + +func TestOptions_OnSymlink(t *testing.T) { + opt := Options{OnSymlink: func(string) SymlinkAction { return Deep }} + err := Copy("test/data/case03", "test/data.copy/case03.deep", opt) + Expect(t, err).ToBe(nil) + info, err := os.Lstat("test/data.copy/case03.deep/case01") + Expect(t, err).ToBe(nil) + Expect(t, info.Mode()&os.ModeSymlink).ToBe(os.FileMode(0)) + + opt = Options{OnSymlink: func(string) SymlinkAction { return Shallow }} + err = Copy("test/data/case03", "test/data.copy/case03.shallow", opt) + Expect(t, err).ToBe(nil) + info, err = os.Lstat("test/data.copy/case03.shallow/case01") + Expect(t, err).ToBe(nil) + Expect(t, info.Mode()&os.ModeSymlink).Not().ToBe(os.FileMode(0)) + + opt = Options{OnSymlink: func(string) SymlinkAction { return Skip }} + err = Copy("test/data/case03", "test/data.copy/case03.skip", opt) + Expect(t, err).ToBe(nil) + _, err = os.Stat("test/data.copy/case03.skip/case01") + Expect(t, os.IsNotExist(err)).ToBe(true) + + err = Copy("test/data/case03", "test/data.copy/case03.default") + Expect(t, err).ToBe(nil) + info, err = os.Lstat("test/data.copy/case03.default/case01") + Expect(t, err).ToBe(nil) + Expect(t, info.Mode()&os.ModeSymlink).Not().ToBe(os.FileMode(0)) + + opt = Options{OnSymlink: nil} + err = Copy("test/data/case03", "test/data.copy/case03.not-specified", opt) + Expect(t, err).ToBe(nil) + info, err = os.Lstat("test/data.copy/case03.not-specified/case01") + Expect(t, err).ToBe(nil) + Expect(t, info.Mode()&os.ModeSymlink).Not().ToBe(os.FileMode(0)) +} diff --git a/vendor/github.com/otiai10/mint/.gitignore b/vendor/github.com/otiai10/mint/.gitignore new file mode 100644 index 000000000000..6ae51791efec --- /dev/null +++ b/vendor/github.com/otiai10/mint/.gitignore @@ -0,0 +1,2 @@ +coverage.txt +vendor diff --git a/vendor/github.com/otiai10/mint/LICENSE b/vendor/github.com/otiai10/mint/LICENSE new file mode 100644 index 000000000000..a5bad7fc46a4 --- /dev/null +++ b/vendor/github.com/otiai10/mint/LICENSE @@ -0,0 +1,7 @@ +Copyright 2017 otiai10 (Hiromu OCHIAI) + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/otiai10/mint/README.md b/vendor/github.com/otiai10/mint/README.md new file mode 100644 index 000000000000..06caae03994c --- /dev/null +++ b/vendor/github.com/otiai10/mint/README.md @@ -0,0 +1,62 @@ +# mint + +[![Go](https://github.com/otiai10/mint/actions/workflows/go.yml/badge.svg)](https://github.com/otiai10/mint/actions/workflows/go.yml) +[![codecov](https://codecov.io/gh/otiai10/mint/branch/master/graph/badge.svg)](https://codecov.io/gh/otiai10/mint) +[![Go Report Card](https://goreportcard.com/badge/github.com/otiai10/mint)](https://goreportcard.com/report/github.com/otiai10/mint) +[![GoDoc](https://godoc.org/github.com/otiai10/mint?status.png)](https://godoc.org/github.com/otiai10/mint) +[![GitHub tag (latest SemVer)](https://img.shields.io/github/v/tag/otiai10/mint?sort=semver)](https://pkg.go.dev/github.com/otiai10/mint) +[![FOSSA Status](https://app.fossa.com/api/projects/git%2Bgithub.com%2Fotiai10%2Fmint.svg?type=shield)](https://app.fossa.com/projects/git%2Bgithub.com%2Fotiai10%2Fmint?ref=badge_shield) + +The very minimum assertion for Go. + +```go +package your_test + +import ( + "testing" + "pkg/your" + . "github.com/otiai10/mint" +) + +func TestFoo(t *testing.T) { + + foo := your.Foo() + Expect(t, foo).ToBe(1234) + Expect(t, foo).TypeOf("int") + Expect(t, foo).Not().ToBe(nil) + Expect(t, func() { yourFunc() }).Exit(1) + + // If assertion failed, exit 1 with message. + Expect(t, foo).ToBe("foobarbuz") + + // You can run assertions without os.Exit + res := Expect(t, foo).Dry().ToBe("bar") + // res.OK() == false + + // You can omit repeated `t`. + m := mint.Blend(t) + m.Expect(foo).ToBe(1234) +} +``` + +# features + +- Simple syntax +- Loosely coupled +- Plain implementation + +# tests +``` +go test ./... +``` + +# use cases + +Projects bellow use `mint` + +- [github.com/otiai10/gosseract](https://github.com/otiai10/gosseract/blob/master/all_test.go) +- [github.com/otiai10/marmoset](https://github.com/otiai10/marmoset/blob/master/all_test.go#L168-L190) + + +## License +[![FOSSA Status](https://app.fossa.com/api/projects/git%2Bgithub.com%2Fotiai10%2Fmint.svg?type=large)](https://app.fossa.com/projects/git%2Bgithub.com%2Fotiai10%2Fmint?ref=badge_large) \ No newline at end of file diff --git a/vendor/github.com/otiai10/mint/because.go b/vendor/github.com/otiai10/mint/because.go new file mode 100644 index 000000000000..6d496cee7574 --- /dev/null +++ b/vendor/github.com/otiai10/mint/because.go @@ -0,0 +1,15 @@ +package mint + +import "testing" + +// Because is context printer. +func Because(t *testing.T, context string, wrapper func(*testing.T)) { + Log(" Because ", context, "\n") + wrapper(t) +} + +// When is an alternative of `Because` +func When(t *testing.T, context string, wrapper func(*testing.T)) { + Log(" When ", context, "\n") + wrapper(t) +} diff --git a/vendor/github.com/otiai10/mint/comparer.go b/vendor/github.com/otiai10/mint/comparer.go new file mode 100644 index 000000000000..d543eb6d52ac --- /dev/null +++ b/vendor/github.com/otiai10/mint/comparer.go @@ -0,0 +1,53 @@ +package mint + +import ( + "fmt" + "reflect" +) + +func getComparer(a, b interface{}, deeply bool) Comparer { + if deeply { + return deepComparer{} + } + switch reflect.ValueOf(a).Kind() { + case reflect.Slice: + return sliceComparer{} + case reflect.Map: + return mapComparer{} + } + if b == nil { + return nilComparer{} + } + return defaultComparer{} +} + +type Comparer interface { + Compare(a, b interface{}) bool +} + +type defaultComparer struct{} + +func (c defaultComparer) Compare(a, b interface{}) bool { + return a == b +} + +type deepComparer struct{} + +func (c deepComparer) Compare(a, b interface{}) bool { + return reflect.DeepEqual(a, b) +} + +type mapComparer struct { + deepComparer +} + +type sliceComparer struct { + deepComparer +} + +type nilComparer struct { +} + +func (c nilComparer) Compare(a, _ interface{}) bool { + return fmt.Sprintf("%v", a) == fmt.Sprintf("%v", nil) +} diff --git a/vendor/github.com/otiai10/mint/exit.go b/vendor/github.com/otiai10/mint/exit.go new file mode 100644 index 000000000000..fc64ac96362d --- /dev/null +++ b/vendor/github.com/otiai10/mint/exit.go @@ -0,0 +1,41 @@ +//go:build !freebsd +// +build !freebsd + +package mint + +// On "freebsd/FreeBSD-10.4-STABLE" OS image, +// Go installed by `pkg install` might NOT have `syscall.Mprotect` +// causing such error: "bou.ke/monkey/replace_unix.go:13:10: undefined: syscall.Mprotect". +// See https://www.freebsd.org/cgi/man.cgi?sektion=2&query=mprotect +// TODO: Fix the image for https://github.com/otiai10/gosseract/blob/master/test/runtimes/freebsd.Vagrantfile#L4 +/* + * "bou.ke/monkey" + */ // FIXME: Now I remove this library because of LICENSE problem +// See https://github.com/otiai10/copy/issues/12 as well + +// Exit ... +func (testee *Testee) Exit(expectedCode int) MintResult { + + panic("`mint.Testee.Exit` method is temporarily deprecated.") + + /* + fun, ok := testee.actual.(func()) + if !ok { + panic("mint error: Exit only can be called for func type value") + } + + var actualCode int + patch := monkey.Patch(os.Exit, func(code int) { + actualCode = code + }) + fun() + patch.Unpatch() + + testee.actual = actualCode + if judge(actualCode, expectedCode, testee.not, testee.deeply) { + return testee.result + } + testee.expected = expectedCode + return testee.failed(failExitCode) + */ +} diff --git a/vendor/github.com/otiai10/mint/exit_freebsd.go b/vendor/github.com/otiai10/mint/exit_freebsd.go new file mode 100644 index 000000000000..d5eed6cf07a4 --- /dev/null +++ b/vendor/github.com/otiai10/mint/exit_freebsd.go @@ -0,0 +1,10 @@ +//go:build freebsd +// +build freebsd + +package mint + +// Exit ... +func (testee *Testee) Exit(expectedCode int) MintResult { + panic("Exit method can NOT be used on FreeBSD, for now.") + return MintResult{ok: false} +} diff --git a/vendor/github.com/otiai10/mint/log.go b/vendor/github.com/otiai10/mint/log.go new file mode 100644 index 000000000000..6aa8f8dcfbb4 --- /dev/null +++ b/vendor/github.com/otiai10/mint/log.go @@ -0,0 +1,15 @@ +package mint + +import ( + "fmt" + "os" +) + +// Log only output if -v flag is given. +// This is because the standard "t.Testing.Log" method decorates +// its caller: runtime.Caller(3) automatically. +func Log(args ...interface{}) { + if isVerbose(os.Args) { + fmt.Print(args...) + } +} diff --git a/vendor/github.com/otiai10/mint/mint.go b/vendor/github.com/otiai10/mint/mint.go new file mode 100644 index 000000000000..a37e3c1d7b68 --- /dev/null +++ b/vendor/github.com/otiai10/mint/mint.go @@ -0,0 +1,86 @@ +package mint + +import ( + "os" + "testing" +) + +// Mint (mint.Mint) is wrapper for *testing.T +// blending testing type to omit repeated `t`. +type Mint struct { + t *testing.T +} + +var ( + failToBe = 0 + failType = 1 + failIn = 2 + failToMatch = 3 + failExitCode = 4 + scolds = map[int]string{ + failToBe: "%s:%d\n\tExpected %sto be\t`%+v`\n\tBut actual\t`%+v`", + failType: "%s:%d\n\tExpected %stype\t`%+v`\n\tBut actual\t`%T`", + failIn: "%s:%d\n\tExpected %sis in\t`%v`\n\tbut it's not", + failToMatch: "%s:%d\n\tExpected %v to match\t`%s`\n\tBut actual\t`%+v`", + failExitCode: "%s:%d\n\tExpected %sto exit with code `%d`\n\tBut actual\t`%d`", + } +) +var ( + redB = "\033[1;31m" + reset = "\033[0m" + colorize = map[string]func(string) string{ + "red": func(v string) string { + return redB + v + reset + }, + } +) + +// Blend provides (blended) *mint.Mint. +// You can save writing "t" repeatedly. +func Blend(t *testing.T) *Mint { + return &Mint{ + t, + } +} + +// Expect provides "*Testee". +// The blended mint is merely a proxy to instantiate testee. +func (m *Mint) Expect(actual interface{}) *Testee { + return expect(m.t, actual) +} + +// Expect provides "*mint.Testee". +// It has assertion methods such as "ToBe". +func Expect(t *testing.T, actual interface{}) *Testee { + return expect(t, actual) +} + +func expect(t *testing.T, actual interface{}) *Testee { + return &Testee{t: t, actual: actual, verbose: isVerbose(os.Args), result: MintResult{ok: true}} +} + +// Require provides "*mint.Testee", +// which stops execution of goroutine when the assertion failed. +func Require(t *testing.T, actual interface{}) *Testee { + return require(t, actual) +} + +func require(t *testing.T, actual interface{}) *Testee { + return &Testee{t: t, actual: actual, verbose: isVerbose(os.Args), required: true, result: MintResult{ok: true}} +} + +func isVerbose(flags []string) bool { + for _, f := range flags { + if f == "-test.v=true" { + return true + } + } + return false +} +func judge(a, b interface{}, not, deeply bool) bool { + comparer := getComparer(a, b, deeply) + if not { + return !comparer.Compare(a, b) + } + return comparer.Compare(a, b) +} diff --git a/vendor/github.com/otiai10/mint/mocks.go b/vendor/github.com/otiai10/mint/mocks.go new file mode 100644 index 000000000000..87feab49405b --- /dev/null +++ b/vendor/github.com/otiai10/mint/mocks.go @@ -0,0 +1,30 @@ +package mint + +import ( + "bytes" + "io/ioutil" + "net/http" +) + +type HTTPClientMock struct { + HTTPError error + ResponseStatusCode int + ResponseBody string +} + +func (hcm *HTTPClientMock) Handle() (res *http.Response, err error, ok bool) { + if hcm.HTTPError != nil { + err = hcm.HTTPError + ok = true + } + res = new(http.Response) + if hcm.ResponseBody != "" { + res.Body = ioutil.NopCloser(bytes.NewBufferString(hcm.ResponseBody)) + ok = true + } + if hcm.ResponseStatusCode != 0 { + res.StatusCode = hcm.ResponseStatusCode + ok = true + } + return res, err, ok +} diff --git a/vendor/github.com/otiai10/mint/mquery/README.md b/vendor/github.com/otiai10/mint/mquery/README.md new file mode 100644 index 000000000000..4992930de4a0 --- /dev/null +++ b/vendor/github.com/otiai10/mint/mquery/README.md @@ -0,0 +1,31 @@ +mquery +=== + +```go +import mquery + +var m = map[string]interface{}{ + "foo": "bar", + "hoge": map[string]interface{}{ + "name": "otiai10", + }, + "fuga": map[int]map[string]interface{}{ + 0: {"greet": "Hello"}, + 1: {"greet": "こんにちは"}, + }, + "langs": []string{"Go", "JavaScript", "English"}, + "baz": nil, + "required": false, +} + +func main() { + fmt.Println( + Query(m, "foo"), // "bar" + Query(m, "hoge.name"), // "otiai10" + Query(m, "fuga.0.greet"), // "Hello" + Query(m, "langs.2"), // "English" + Query(m, "required"), // false + Query(m, "baz.biz"), // nil + ) +} +``` \ No newline at end of file diff --git a/vendor/github.com/otiai10/mint/mquery/mquery.go b/vendor/github.com/otiai10/mint/mquery/mquery.go new file mode 100644 index 000000000000..2a7ddbac39b9 --- /dev/null +++ b/vendor/github.com/otiai10/mint/mquery/mquery.go @@ -0,0 +1,72 @@ +package mquery + +import ( + "fmt" + "reflect" + "strconv" + "strings" +) + +func Query(m interface{}, q string) interface{} { + return query(m, strings.Split(q, ".")) +} + +func query(m interface{}, qs []string) interface{} { + t := reflect.TypeOf(m) + switch t.Kind() { + case reflect.Map: + return queryMap(m, t, qs) + case reflect.Slice: + return querySlice(m, t, qs) + default: + return m + } +} + +func queryMap(m interface{}, t reflect.Type, qs []string) interface{} { + if len(qs) == 0 { + return m + } + val := reflect.ValueOf(m) + if val.IsZero() { + return nil + } + switch t.Key().Kind() { + case reflect.String: + val := reflect.ValueOf(m).MapIndex(reflect.ValueOf(qs[0])) + if !val.IsValid() { + return nil + } + return query(val.Interface(), qs[1:]) + case reflect.Int: + i, err := strconv.Atoi(qs[0]) + if err != nil { + return fmt.Errorf("cannot access map with keyword: %s: %v", qs[0], err) + } + val := reflect.ValueOf(m).MapIndex(reflect.ValueOf(i)) + if !val.IsValid() { + return nil + } + return query(val.Interface(), qs[1:]) + } + return nil +} + +func querySlice(m interface{}, t reflect.Type, qs []string) interface{} { + if len(qs) == 0 { + return m + } + v := reflect.ValueOf(m) + if v.Len() == 0 { + return nil + } + i, err := strconv.Atoi(qs[0]) + if err != nil { + return fmt.Errorf("cannot access slice with keyword: %s: %v", qs[0], err) + } + if v.Len() <= i { + return nil + } + next := v.Index(i).Interface() + return query(next, qs[1:]) +} diff --git a/vendor/github.com/otiai10/mint/result.go b/vendor/github.com/otiai10/mint/result.go new file mode 100644 index 000000000000..2ce38c0564e5 --- /dev/null +++ b/vendor/github.com/otiai10/mint/result.go @@ -0,0 +1,23 @@ +package mint + +// MintResult provide the results of assertion +// for `Dry` option. +type MintResult struct { + ok bool + message string +} + +// OK returns whether result is ok or not. +func (r MintResult) OK() bool { + return r.ok +} + +// NG is the opposite alias for OK(). +func (r MintResult) NG() bool { + return !r.ok +} + +// Message returns failure message. +func (r MintResult) Message() string { + return r.message +} diff --git a/vendor/github.com/otiai10/mint/testee.go b/vendor/github.com/otiai10/mint/testee.go new file mode 100644 index 000000000000..90537fc3b203 --- /dev/null +++ b/vendor/github.com/otiai10/mint/testee.go @@ -0,0 +1,145 @@ +package mint + +import ( + "fmt" + "path/filepath" + "reflect" + "regexp" + "runtime" + "testing" + + "github.com/otiai10/mint/mquery" +) + +// Testee is holder of interfaces which user want to assert +// and also has its result. +type Testee struct { + t *testing.T + actual interface{} + expected interface{} + dry bool + not bool + deeply bool + result MintResult + required bool + verbose bool + + // origin string // Only used when querying +} + +// Query queries the actual value with given query string. +func (testee *Testee) Query(query string) *Testee { + // testee.origin = fmt.Sprintf("%T", testee.actual) + testee.actual = mquery.Query(testee.actual, query) + return testee +} + +// ToBe can assert the testee to equal the parameter of this func. +// OS will exit with code 1, when the assertion fail. +// If you don't want to exit, see "Dry()". +func (testee *Testee) ToBe(expected interface{}) MintResult { + if judge(testee.actual, expected, testee.not, testee.deeply) { + return testee.result + } + testee.expected = expected + return testee.failed(failToBe) +} + +// Match can assert the testee to match with specified regular expression. +// It uses `regexp.MustCompile`, it's due to caller to make sure it's valid regexp. +// OS will exit with code 1, when the assertion fail. +// If you don't want to exit, see "Dry()". +func (testee *Testee) Match(expression string) MintResult { + exp := regexp.MustCompile(expression) + matched := exp.MatchString(fmt.Sprintf("%v", testee.actual)) + if judge(matched, true, testee.not, testee.deeply) { + return testee.result + } + testee.expected = expression + return testee.failed(failToMatch) +} + +// In can assert the testee is in given array. +func (testee *Testee) In(expecteds ...interface{}) MintResult { + for _, expected := range expecteds { + if judge(testee.actual, expected, testee.not, testee.deeply) { + return testee.result + } + } + testee.expected = expecteds + return testee.failed(failIn) +} + +// TypeOf can assert the type of testee to equal the parameter of this func. +// OS will exit with code 1, when the assertion fail. +// If you don't want to exit, see "Dry()". +func (testee *Testee) TypeOf(typeName string) MintResult { + if judge(reflect.TypeOf(testee.actual).String(), typeName, testee.not, testee.deeply) { + return testee.result + } + testee.expected = typeName + return testee.failed(failType) +} + +// Not makes following assertion conversed. +func (testee *Testee) Not() *Testee { + testee.not = true + return testee +} + +// Dry makes the testee NOT to call "Fail()". +// Use this if you want to fail test in a purpose. +func (testee *Testee) Dry() *Testee { + testee.dry = true + return testee +} + +// Deeply makes following assertions use `reflect.DeepEqual`. +// You had better use this to compare reference type objects. +func (testee *Testee) Deeply() *Testee { + testee.deeply = true + return testee +} + +func (testee *Testee) failed(failure int) MintResult { + message := testee.toText(failure) + testee.result.ok = false + testee.result.message = message + if !testee.dry { + fmt.Println(colorize["red"](message)) + if testee.required { + testee.t.FailNow() + } else { + testee.t.Fail() + } + } + return testee.result +} + +func (testee *Testee) toText(fail int) string { + not := "" + if testee.not { + not = "NOT " + } + _, file, line, _ := runtime.Caller(3) + // if testee.origin != "" { + // testee.origin = fmt.Sprintf("(queried from %s)", testee.origin) + // } + return fmt.Sprintf( + scolds[fail], + filepath.Base(file), line, + not, + testee.expected, + testee.actual, + ) +} + +// Log only output if -v flag is given. +// This is because the standard "t.Testing.Log" method decorates +// its caller: runtime.Caller(3) automatically. +func (testee *Testee) Log(args ...interface{}) { + if !testee.verbose { + return + } + fmt.Print(args...) +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 075517e85c94..a68d32888472 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -403,6 +403,13 @@ github.com/opencontainers/selinux/pkg/pwalkdir # github.com/openebs/lvm-localpv v1.7.0 ## explicit; go 1.19 github.com/openebs/lvm-localpv/pkg/apis/openebs.io/lvm/v1alpha1 +# github.com/otiai10/copy v1.14.1 +## explicit; go 1.18 +github.com/otiai10/copy +# github.com/otiai10/mint v1.6.3 +## explicit; go 1.18 +github.com/otiai10/mint +github.com/otiai10/mint/mquery # github.com/pelletier/go-toml v1.9.5 ## explicit; go 1.12 github.com/pelletier/go-toml From 954b6771b666f8d5e8d39e43d63ded148774e8d1 Mon Sep 17 00:00:00 2001 From: luanshaotong Date: Thu, 17 Jul 2025 08:35:33 +0000 Subject: [PATCH 05/19] fix devbox snapshotter config --- snapshots/devbox/plugin/plugin.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/snapshots/devbox/plugin/plugin.go b/snapshots/devbox/plugin/plugin.go index c88722f8e0e4..a1e15c68330e 100644 --- a/snapshots/devbox/plugin/plugin.go +++ b/snapshots/devbox/plugin/plugin.go @@ -32,7 +32,7 @@ type Config struct { RootPath string `toml:"root_path"` UpperdirLabel bool `toml:"upperdir_label"` SyncRemove bool `toml:"sync_remove"` - lvmVgName string `toml:"lvm_vg_name"` + LvmVgName string `toml:"lvm_vg_name"` // MountOptions are options used for the overlay mount (not used on bind mounts) MountOptions []string `toml:"mount_options"` @@ -68,11 +68,11 @@ func init() { oOpts = append(oOpts, devbox.WithMountOptions(config.MountOptions)) } - if config.lvmVgName == "" { + if config.LvmVgName == "" { // If no LVM VG name is provided, use the default. - config.lvmVgName = "devbox-lvm-vg" + config.LvmVgName = "devbox-lvm-vg" } - oOpts = append(oOpts, devbox.WithLvmVgName(config.lvmVgName)) + oOpts = append(oOpts, devbox.WithLvmVgName(config.LvmVgName)) ic.Meta.Exports[plugin.SnapshotterRootDir] = root return devbox.NewSnapshotter(root, oOpts...) From f6ec2e1fc71abf052e95e3c819c351b26a0819de Mon Sep 17 00:00:00 2001 From: luanshaotong Date: Thu, 17 Jul 2025 08:47:27 +0000 Subject: [PATCH 06/19] reduce message print --- snapshots/devbox/devbox.go | 31 ++++++++++++++----------------- snapshots/devbox/storage/bolt.go | 7 ------- 2 files changed, 14 insertions(+), 24 deletions(-) diff --git a/snapshots/devbox/devbox.go b/snapshots/devbox/devbox.go index 298126324bd3..cdd288056621 100644 --- a/snapshots/devbox/devbox.go +++ b/snapshots/devbox/devbox.go @@ -281,7 +281,7 @@ func (o *snapshotter) Usage(ctx context.Context, key string) (_ snapshots.Usage, } func (o *snapshotter) Prepare(ctx context.Context, key, parent string, opts ...snapshots.Opt) ([]mount.Mount, error) { - fmt.Println("Prepare called with key:", key, "parent:", parent, "opts:", opts) + log.G(ctx).Debug("Prepare called with key:", key, "parent:", parent, "opts:", opts) return o.createSnapshot(ctx, snapshots.KindActive, key, parent, opts) } @@ -635,7 +635,7 @@ func (o *snapshotter) createSnapshot(ctx context.Context, kind snapshots.Kind, k } for label, value := range base.Labels { - fmt.Printf("Snapshot label: %s=%s\n", label, value) + log.G(ctx).WithFields(logrus.Fields{"label": label, "value": value}).Debug("Snapshot label") } contentId, idOk := base.Labels[devboxContentIDKey] @@ -658,17 +658,17 @@ func (o *snapshotter) createSnapshot(ctx context.Context, kind snapshots.Kind, k return fmt.Errorf("failed to create snapshot: %w", err) } - fmt.Println("Created snapshot:", s.ID) + log.G(ctx).Debug("Created snapshot:", s.ID) npath = filepath.Join(snapshotDir, s.ID) // use npath instead of path to avoid removing the directory before create - fmt.Println("Snapshot directory path:", npath) + log.G(ctx).Debug("Snapshot directory path:", npath) if idOk && limitOk { var notExistErr error lvName, notExistErr = storage.GetDevboxLvName(ctx, contentId) - fmt.Println("LVM logical volume name for content ID:", contentId, "is", lvName) + log.G(ctx).Debug("LVM logical volume name for content ID:", contentId, "is", lvName) if notExistErr == nil && lvName != "" { // mount point for the snapshot - fmt.Println("LVM logical volume name found for content ID:", contentId, "is", lvName) + log.G(ctx).Debug("LVM logical volume name found for content ID:", contentId, "is", lvName) var isMounted bool if isMounted, err = isMountPoint(npath); err != nil { return fmt.Errorf("failed to check if path is a mount point: %w", err) @@ -724,17 +724,17 @@ func (o *snapshotter) createSnapshot(ctx context.Context, kind snapshots.Kind, k if err = cp.Copy(parent_upperdir, filepath.Join(td, "fs"), opt); err != nil { return fmt.Errorf("failed to copy parent upperdir to new snapshot upperdir: %w, from %s to %s", err, parent_upperdir, td) } - fmt.Println("Copied parent upperdir to new snapshot upperdir:", td) + log.G(ctx).Debug("Copied parent upperdir to new snapshot upperdir:", td) } - fmt.Println("Prepared LVM directory for snapshot:", td, "with logical volume name:", lvName) + log.G(ctx).Debug("Prepared LVM directory for snapshot:", td, "with logical volume name:", lvName) storage.SetDevboxContent(ctx, key, contentId, lvName, npath) if err != nil { return fmt.Errorf("failed to prepare LVM directory for snapshot: %w", err) } } else { td, err = o.prepareDirectory(ctx, snapshotDir, kind) - fmt.Println("Created temporary directory for snapshot:", td) + log.G(ctx).Debug("Created temporary directory for snapshot:", td) } if err != nil { @@ -759,23 +759,23 @@ func (o *snapshotter) createSnapshot(ctx context.Context, kind snapshots.Kind, k if err != nil { return fmt.Errorf("failed to unmount LVM logical volume %s: %w", lvName, err) } - fmt.Println("Unmounted LVM logical volume:", lvName, "from temporary directory:", td) + log.G(ctx).Debug("Unmounted LVM logical volume:", lvName, "from temporary directory:", td) if err = os.MkdirAll(npath, 0755); err != nil { return fmt.Errorf("failed to create snapshot directory: %w", err) } path = npath - fmt.Println("Created snapshot directory:", path) + log.G(ctx).Debug("Created snapshot directory:", path) err = o.mountLvm(ctx, lvName, path) if err != nil { return fmt.Errorf("failed to mount LVM logical volume %s: %w", lvName, err) } - fmt.Println("Mounted LVM logical volume:", lvName, "to snapshot directory:", path) + log.G(ctx).Debug("Mounted LVM logical volume:", lvName, "to snapshot directory:", path) } else { if err = os.Rename(td, npath); err != nil { return fmt.Errorf("failed to rename: %w", err) } path = npath - fmt.Println("Renamed temporary directory to snapshot directory:", path) + log.G(ctx).Debug("Renamed temporary directory to snapshot directory:", path) } td = "" @@ -826,8 +826,6 @@ func parseUseLimit(useLimit string) (string, error) { return "", fmt.Errorf("invalid use limit format: %s", useLimit) } - fmt.Println("Parsed use limit:", useLimit, "with multipliers:", multipliers) - capacity, err := strconv.Atoi(useLimit) if err != nil { return "", fmt.Errorf("failed to parse use limit %s: %w", useLimit, err) @@ -863,7 +861,6 @@ func (o *snapshotter) prepareLvmDirectory(ctx context.Context, snapshotDir strin if err != nil { return td, "", fmt.Errorf("failed to parse use limit %s: %w", useLimit, err) } - fmt.Println("Parsed use limit:", capacity) vol := &apis.LVMVolume{ ObjectMeta: metav1.ObjectMeta{ @@ -874,7 +871,7 @@ func (o *snapshotter) prepareLvmDirectory(ctx context.Context, snapshotDir strin VolGroup: o.lvmVgName, }, } - fmt.Println("Creating LVM volume:", lvName, "with capacity:", capacity, "in volume group:", o.lvmVgName) + log.G(ctx).Debug("Creating LVM volume:", lvName, "with capacity:", capacity, "in volume group:", o.lvmVgName) err = lvm.CreateVolume(vol) if err != nil { return td, "", fmt.Errorf("failed to create LVM logical volume %s: %w", lvName, err) diff --git a/snapshots/devbox/storage/bolt.go b/snapshots/devbox/storage/bolt.go index 0937039301c6..df3d476e103b 100644 --- a/snapshots/devbox/storage/bolt.go +++ b/snapshots/devbox/storage/bolt.go @@ -720,7 +720,6 @@ func GetDevboxLvName(ctx context.Context, contentKey string) (string, error) { // return fmt.Errorf("LVM name for content key %s not found: %w", contentKey, errdefs.ErrNotFound) // } lvName = string(lvNameByte) - fmt.Printf("lvName: %s\n", lvName) return nil }) if err != nil { @@ -740,7 +739,6 @@ func SetDevboxContent(ctx context.Context, key, contentID, lvName, path string) if bkt == nil { return fmt.Errorf("devbox storage path bucket does not exist: %w", errdefs.ErrNotFound) } - fmt.Printf("devbox storage path bucket: %s\n", key) err := bkt.Put(DevboxKeyContentID, []byte(contentID)) if err != nil { return fmt.Errorf("failed to set content ID for key %s: %w", key, err) @@ -778,7 +776,6 @@ func SetDevboxContentStatusRemove(ctx context.Context, contentID string) error { if err := sdbkt.Put(DevboxKeyStatus, DevboxStatusRemoved); err != nil { return fmt.Errorf("failed to set status for content key %s: %w", contentID, err) } - fmt.Printf("Set devbox content status for key: %s, status: %s\n", contentID, DevboxStatusRemoved) return nil }) } @@ -787,7 +784,6 @@ func RemoveDevboxContent(ctx context.Context, Key string) (string, error) { var ( mountPath string ) - fmt.Printf("Removing devbox content for key: %s\n", Key) if Key == "" { return "", fmt.Errorf("content key cannot be empty") } @@ -801,7 +797,6 @@ func RemoveDevboxContent(ctx context.Context, Key string) (string, error) { return errdefs.ErrNotFound } contentID := sbkt.Get(DevboxKeyContentID) - fmt.Printf("devbox storage path bucket for key: %s, content ID: %s\n", Key, string(contentID)) mountPath = string(sbkt.Get(DevboxKeyPath)) if contentID == nil { // return fmt.Errorf("content ID for key %s not found: %w", Key, errdefs.ErrNotFound) @@ -820,7 +815,6 @@ func RemoveDevboxContent(ctx context.Context, Key string) (string, error) { if string(status) == string(DevboxStatusRemoved) { // remove the bucket if it is already marked as removed dbkt.DeleteBucket([]byte(contentID)) - fmt.Printf("Removed devbox content for key: %s, content ID: %s\n", Key, string(contentID)) } else { // if the status is not removed, only remove the mount path sdbkt.Delete([]byte(DevboxKeyPath)) @@ -831,7 +825,6 @@ func RemoveDevboxContent(ctx context.Context, Key string) (string, error) { if err != nil { return "", err } - fmt.Printf("Removed devbox content for key: %s, mount path: %s\n", Key, mountPath) return mountPath, nil } From e1a8aa2930cb14970852ce903e4dabbc96ed2f3f Mon Sep 17 00:00:00 2001 From: luanshaotong Date: Fri, 18 Jul 2025 06:00:27 +0000 Subject: [PATCH 07/19] fix metadata --- snapshots/devbox/devbox.go | 24 ++++++++++++---- snapshots/devbox/lvm/lvm.go | 14 +++++---- snapshots/devbox/plugin/plugin.go | 3 ++ snapshots/devbox/storage/bolt.go | 47 ++++++++++++++++--------------- 4 files changed, 55 insertions(+), 33 deletions(-) diff --git a/snapshots/devbox/devbox.go b/snapshots/devbox/devbox.go index cdd288056621..eb5d96e12a91 100644 --- a/snapshots/devbox/devbox.go +++ b/snapshots/devbox/devbox.go @@ -61,6 +61,7 @@ type SnapshotterConfig struct { UpperdirLabel bool ms MetaStore lvmVgName string // modified by sealos + ThinPoolName string mountOptions []string } @@ -94,6 +95,13 @@ func WithLvmVgName(name string) Opt { } } +func WithThinPoolName(name string) Opt { + return func(config *SnapshotterConfig) error { + config.ThinPoolName = name + return nil + } +} + // end modified by sealos // WithMountOptions defines the default mount options used for the overlay mount. @@ -126,6 +134,8 @@ type snapshotter struct { asyncRemove bool upperdirLabel bool lvmVgName string // modified by sealos + ThinPoolName string + UseThinPool bool options []string } @@ -182,6 +192,7 @@ func NewSnapshotter(root string, opts ...Opt) (snapshots.Snapshotter, error) { asyncRemove: config.AsyncRemove, upperdirLabel: config.UpperdirLabel, lvmVgName: config.lvmVgName, // modified by sealos + ThinPoolName: config.ThinPoolName, options: config.mountOptions, }, nil } @@ -226,7 +237,7 @@ func (o *snapshotter) Update(ctx context.Context, info snapshots.Info, fieldpath err = o.ms.WithTransaction(ctx, true, func(ctx context.Context) error { if value, ok := info.Labels[removeContentIDKey]; ok { - storage.RemoveDevboxContent(ctx, value) + storage.SetDevboxContentStatusRemoved(ctx, value) } newInfo, err = storage.UpdateInfo(ctx, info, fieldpaths...) @@ -365,7 +376,7 @@ func (o *snapshotter) Remove(ctx context.Context, key string) (err error) { return o.ms.WithTransaction(ctx, true, func(ctx context.Context) error { // modified by sealos var mountPath string - mountPath, err = storage.RemoveDevboxContent(ctx, key) + mountPath, err = storage.RemoveDevbox(ctx, key) log.G(ctx).Infof("Removed devbox content for key: %s, mount path: %s", key, mountPath) if err != nil && err != errdefs.ErrNotFound { return fmt.Errorf("failed to remove devbox content for snapshot %s: %w", key, err) @@ -502,7 +513,7 @@ func (o *snapshotter) getCleanupLvNames(ctx context.Context) ([]string, error) { } // lvs := o.vgo.ListLVs() - lvs, err := lvm.ListLVMLogicalVolumeByVG(o.lvmVgName) + lvs, err := lvm.ListLVMLogicalVolumeByVG(o.lvmVgName, o.ThinPoolName) if err != nil { return nil, fmt.Errorf("failed to list LVM logical volumes: %w", err) } @@ -689,7 +700,7 @@ func (o *snapshotter) createSnapshot(ctx context.Context, kind snapshots.Kind, k // remove devbox metadata if new lv is created defer func() { if err != nil { - mountPath, err := storage.RemoveDevboxContent(ctx, key) + mountPath, err := storage.RemoveDevbox(ctx, key) if err != nil { log.G(ctx).WithError(err).Warnf("failed to remove devbox content for key %s", contentId) } @@ -867,8 +878,9 @@ func (o *snapshotter) prepareLvmDirectory(ctx context.Context, snapshotDir strin Name: lvName, }, Spec: apis.VolumeInfo{ - Capacity: capacity, - VolGroup: o.lvmVgName, + Capacity: capacity, + VolGroup: o.lvmVgName, + ThinProvision: o.ThinPoolName, }, } log.G(ctx).Debug("Creating LVM volume:", lvName, "with capacity:", capacity, "in volume group:", o.lvmVgName) diff --git a/snapshots/devbox/lvm/lvm.go b/snapshots/devbox/lvm/lvm.go index e1a2726efd4c..062333bd8211 100644 --- a/snapshots/devbox/lvm/lvm.go +++ b/snapshots/devbox/lvm/lvm.go @@ -219,11 +219,12 @@ func buildLVMCreateArgs(vol *apis.LVMVolume) []string { volume := vol.Name size := vol.Spec.Capacity + "b" // thinpool name required for thinProvision volumes - pool := vol.Spec.VolGroup + "_thinpool" + // pool := vol.Spec.VolGroup + "_thinpool" + pool := vol.Spec.ThinProvision if len(vol.Spec.Capacity) != 0 { // check if thin pool exists for given volumegroup requested thin volume - if strings.TrimSpace(vol.Spec.ThinProvision) != YES { + if strings.TrimSpace(vol.Spec.ThinProvision) == "" { LVMVolArg = append(LVMVolArg, "-L", size) } else if !lvThinExists(vol.Spec.VolGroup, pool) { // thinpool size can't be equal or greater than actual volumegroup size @@ -233,7 +234,7 @@ func buildLVMCreateArgs(vol *apis.LVMVolume) []string { // command to create thinpool and thin volume if thinProvision is enabled // `lvcreate -L 1G -T lvmvg/mythinpool -V 1G -n thinvol` - if strings.TrimSpace(vol.Spec.ThinProvision) == YES { + if strings.TrimSpace(vol.Spec.ThinProvision) != "" { LVMVolArg = append(LVMVolArg, "-T", vol.Spec.VolGroup+"/"+pool, "-V", size) } @@ -241,7 +242,7 @@ func buildLVMCreateArgs(vol *apis.LVMVolume) []string { LVMVolArg = append(LVMVolArg, "-n", volume) } - if strings.TrimSpace(vol.Spec.ThinProvision) != YES { + if strings.TrimSpace(vol.Spec.ThinProvision) == "" { LVMVolArg = append(LVMVolArg, vol.Spec.VolGroup) } @@ -886,7 +887,7 @@ func ListLVMLogicalVolume() ([]LogicalVolume, error) { } // modified by sealos -func ListLVMLogicalVolumeByVG(vg string) ([]LogicalVolume, error) { +func ListLVMLogicalVolumeByVG(vg string, pool string) ([]LogicalVolume, error) { if err := ReloadLVMMetadataCache(); err != nil { return nil, err } @@ -897,6 +898,9 @@ func ListLVMLogicalVolumeByVG(vg string) ([]LogicalVolume, error) { "--units", "b", "--select", fmt.Sprintf("vg_name=%s", vg), } + if pool != "" { + args = append(args, "--select", fmt.Sprintf("pool_lv=%s", pool)) + } output, _, err := RunCommandSplit(LVList, args...) if err != nil { klog.Errorf("lvm: error while running command %s %v: %v", LVList, args, err) diff --git a/snapshots/devbox/plugin/plugin.go b/snapshots/devbox/plugin/plugin.go index a1e15c68330e..6764b70494e4 100644 --- a/snapshots/devbox/plugin/plugin.go +++ b/snapshots/devbox/plugin/plugin.go @@ -33,6 +33,7 @@ type Config struct { UpperdirLabel bool `toml:"upperdir_label"` SyncRemove bool `toml:"sync_remove"` LvmVgName string `toml:"lvm_vg_name"` + ThinPoolName string `toml:"thin_pool_name"` // MountOptions are options used for the overlay mount (not used on bind mounts) MountOptions []string `toml:"mount_options"` @@ -74,6 +75,8 @@ func init() { } oOpts = append(oOpts, devbox.WithLvmVgName(config.LvmVgName)) + oOpts = append(oOpts, devbox.WithThinPoolName(config.ThinPoolName)) + ic.Meta.Exports[plugin.SnapshotterRootDir] = root return devbox.NewSnapshotter(root, oOpts...) diff --git a/snapshots/devbox/storage/bolt.go b/snapshots/devbox/storage/bolt.go index df3d476e103b..b256efdf493a 100644 --- a/snapshots/devbox/storage/bolt.go +++ b/snapshots/devbox/storage/bolt.go @@ -735,26 +735,29 @@ func SetDevboxContent(ctx context.Context, key, contentID, lvName, path string) } return withDevboxBucket(ctx, func(ctx context.Context, bkt *bolt.Bucket, dbkt *bolt.Bucket) error { - bkt = bkt.Bucket([]byte(key)) - if bkt == nil { - return fmt.Errorf("devbox storage path bucket does not exist: %w", errdefs.ErrNotFound) + sbkt := bkt.Bucket([]byte(key)) + if sbkt == nil { + return fmt.Errorf("snapshot key %s not found: %w", key, errdefs.ErrNotFound) } - err := bkt.Put(DevboxKeyContentID, []byte(contentID)) + err := sbkt.Put(DevboxKeyContentID, []byte(contentID)) if err != nil { return fmt.Errorf("failed to set content ID for key %s: %w", key, err) } + if err := sbkt.Put([]byte(DevboxKeyPath), []byte(path)); err != nil { + return fmt.Errorf("failed to set devbox path for content key %s: %w", contentID, err) + } if dbkt == nil { - return fmt.Errorf("devbox storage path bucket does not exist: %w", errdefs.ErrNotFound) + return fmt.Errorf("devbox devbox path bucket does not exist: %w", errdefs.ErrNotFound) } sdbkt, err := dbkt.CreateBucketIfNotExists([]byte(contentID)) if err != nil { return fmt.Errorf("failed to create bucket for content key %s: %w", contentID, err) } if err := sdbkt.Put([]byte(DevboxKeyLvName), []byte(lvName)); err != nil { - return fmt.Errorf("failed to set storage path for content key %s: %w", contentID, err) + return fmt.Errorf("failed to set lvname for content key %s: %w", contentID, err) } if err := sdbkt.Put([]byte(DevboxKeyPath), []byte(path)); err != nil { - return fmt.Errorf("failed to set storage path for content key %s: %w", contentID, err) + return fmt.Errorf("failed to set devbox path for content key %s: %w", contentID, err) } if err := sdbkt.Put([]byte(DevboxKeyStatus), []byte(DevboxStatusActive)); err != nil { return fmt.Errorf("failed to set status for content key %s: %w", contentID, err) @@ -763,7 +766,7 @@ func SetDevboxContent(ctx context.Context, key, contentID, lvName, path string) }) } -func SetDevboxContentStatusRemove(ctx context.Context, contentID string) error { +func SetDevboxContentStatusRemoved(ctx context.Context, contentID string) error { if contentID == "" { return fmt.Errorf("content key cannot be empty") } @@ -780,7 +783,7 @@ func SetDevboxContentStatusRemove(ctx context.Context, contentID string) error { }) } -func RemoveDevboxContent(ctx context.Context, Key string) (string, error) { +func RemoveDevbox(ctx context.Context, Key string) (string, error) { var ( mountPath string ) @@ -798,15 +801,10 @@ func RemoveDevboxContent(ctx context.Context, Key string) (string, error) { } contentID := sbkt.Get(DevboxKeyContentID) mountPath = string(sbkt.Get(DevboxKeyPath)) - if contentID == nil { - // return fmt.Errorf("content ID for key %s not found: %w", Key, errdefs.ErrNotFound) - } - // if err := bkt.DeleteBucket([]byte(Key)); err != nil { - // if errors.Is(err, bolt.ErrBucketNotFound) { - // return fmt.Errorf("storage path for content key %s not found: %w", Key, errdefs.ErrNotFound) - // } - // return fmt.Errorf("failed to delete storage path for content key %s: %w", Key, err) - // } + if len(contentID) == 0 { + fmt.Printf("content ID for key %s is empty, continuing with snapshotter removal\n", Key) + return nil // if contentID is nil, continue with the snapshotter removal + } sdbkt := dbkt.Bucket([]byte(contentID)) if sdbkt == nil { return fmt.Errorf("devbox storage path bucket for content ID %s does not exist: %w", string(contentID), errdefs.ErrNotFound) @@ -832,18 +830,23 @@ func RemoveDevboxContent(ctx context.Context, Key string) (string, error) { func GetDevboxLvNames(ctx context.Context) (map[string]string, error) { m := map[string]string{} if err := withDevboxBucket(ctx, func(ctx context.Context, _ *bolt.Bucket, dbkt *bolt.Bucket) error { - return dbkt.ForEach(func(k, v []byte) error { + return dbkt.ForEachBucket(func(k []byte) error { // skip non buckets - if v != nil { + v := dbkt.Bucket(k) + if v == nil { return nil } - path := dbkt.Bucket(k).Get(DevboxKeyPath) - m[string(path)] = string(k) + lvName := v.Get(DevboxKeyLvName) + path := v.Get(DevboxKeyPath) + if len(lvName) > 0 { + m[string(lvName)] = string(path) + } return nil }) }); err != nil { return nil, err } + fmt.Printf("devbox lv names: %v\n", m) return m, nil } From 5e82f8eaf350f34d957337dcbea9f03c5bc0f7d4 Mon Sep 17 00:00:00 2001 From: luanshaotong Date: Thu, 24 Jul 2025 08:57:33 +0000 Subject: [PATCH 08/19] add resize feature --- snapshots/devbox/devbox.go | 24 ++++++++++++++++++++++++ snapshots/devbox/lvm/lvm.go | 4 ++++ 2 files changed, 28 insertions(+) diff --git a/snapshots/devbox/devbox.go b/snapshots/devbox/devbox.go index eb5d96e12a91..324335dfe928 100644 --- a/snapshots/devbox/devbox.go +++ b/snapshots/devbox/devbox.go @@ -532,6 +532,27 @@ func (o *snapshotter) getCleanupLvNames(ctx context.Context) ([]string, error) { return cleanup, nil } +func (o *snapshotter) resizeLVMVolume(lvName, useLimit string) error { + + capacity, err := parseUseLimit(useLimit) + if err != nil { + return fmt.Errorf("failed to parse use limit %s: %w", useLimit, err) + } + + vol := &apis.LVMVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: lvName, + }, + Spec: apis.VolumeInfo{ + Capacity: capacity, + VolGroup: o.lvmVgName, + ThinProvision: o.ThinPoolName, + }, + } + + return lvm.ResizeLVMVolume(vol, false) +} + func isMountPoint(dir string) (bool, error) { // 读取 /proc/mounts 文件 data, err := os.ReadFile("/proc/mounts") @@ -686,6 +707,9 @@ func (o *snapshotter) createSnapshot(ctx context.Context, kind snapshots.Kind, k } else if isMounted { log.G(ctx).Infof("Path %s is already mounted, skipping mount", npath) } else { + if err = o.resizeLVMVolume(lvName, useLimit); err != nil { + return fmt.Errorf("failed to resize LVM logical volume %s: %w", lvName, err) + } // mount the LVM logical volume if err = o.mountLvm(ctx, lvName, npath); err != nil { return fmt.Errorf("failed to mount LVM logical volume %s: %w", lvName, err) diff --git a/snapshots/devbox/lvm/lvm.go b/snapshots/devbox/lvm/lvm.go index 062333bd8211..e7374a0f1fc3 100644 --- a/snapshots/devbox/lvm/lvm.go +++ b/snapshots/devbox/lvm/lvm.go @@ -293,6 +293,10 @@ func CreateVolume(vol *apis.LVMVolume) error { } if volExists { klog.Infof("lvm: volume (%s) already exists, skipping its creation", volume) + err := ResizeLVMVolume(vol, false) + if err != nil { + return err + } return nil } From 059f1db8354ab01d5e8814db16319cb34f94b3ae Mon Sep 17 00:00:00 2001 From: luanshaotong Date: Mon, 4 Aug 2025 08:19:13 +0000 Subject: [PATCH 09/19] fix no meta data while recreate --- pkg/cri/server/container_create_linux.go | 9 ++------- snapshots/devbox/devbox.go | 3 +++ 2 files changed, 5 insertions(+), 7 deletions(-) diff --git a/pkg/cri/server/container_create_linux.go b/pkg/cri/server/container_create_linux.go index 5b484b74b378..1b9d1dafde97 100644 --- a/pkg/cri/server/container_create_linux.go +++ b/pkg/cri/server/container_create_linux.go @@ -21,6 +21,7 @@ import ( "errors" "fmt" "io" + "maps" "os" "strconv" "strings" @@ -618,13 +619,7 @@ func devboxSnapshotterOpts(snapshotterName string, config *runtime.PodSandboxCon } // add container annotations to snapshot labels labels := make(map[string]string) - for k, v := range config.Annotations { - // if strings.HasPrefix(k, DevboxSnapshotLabelPrefix) { - labels[k] = v - fmt.Printf("devboxSnapshotterOpts: k=%s, v=%s\n", k, v) - // } - } - // labels["devbox.sealos.io/use-limit"] = "10Gi" + maps.Copy(labels, config.Annotations) return snapshots.WithLabels(labels), nil } diff --git a/snapshots/devbox/devbox.go b/snapshots/devbox/devbox.go index 324335dfe928..3e031fbe956c 100644 --- a/snapshots/devbox/devbox.go +++ b/snapshots/devbox/devbox.go @@ -710,10 +710,13 @@ func (o *snapshotter) createSnapshot(ctx context.Context, kind snapshots.Kind, k if err = o.resizeLVMVolume(lvName, useLimit); err != nil { return fmt.Errorf("failed to resize LVM logical volume %s: %w", lvName, err) } + + storage.SetDevboxContent(ctx, key, contentId, lvName, npath) // mount the LVM logical volume if err = o.mountLvm(ctx, lvName, npath); err != nil { return fmt.Errorf("failed to mount LVM logical volume %s: %w", lvName, err) } + path = npath } // reuse of old lv, no need to prepare a new directory return nil From 540a6bc084e8f6724b06a1da7db5c25c6f432f57 Mon Sep 17 00:00:00 2001 From: luanshaotong Date: Thu, 7 Aug 2025 06:07:34 +0000 Subject: [PATCH 10/19] remove lv mount path after stopping container --- client.go | 20 ++++ pkg/cri/server/container_stop.go | 17 +++ snapshots/devbox/devbox.go | 185 ++++++++++++++++++------------- snapshots/devbox/storage/bolt.go | 85 +++++++++++++- 4 files changed, 223 insertions(+), 84 deletions(-) diff --git a/client.go b/client.go index 69614712ec67..e201d74885e9 100644 --- a/client.go +++ b/client.go @@ -303,6 +303,21 @@ func (c *Client) NewContainer(ctx context.Context, id string, opts ...NewContain return containerFromRecord(c, r), nil } +func (c *Client) UpdateDevboxSnapshot(ctx context.Context, snapshotter string, id string, label string, value string) error { + fmt.Println("Check snapshotter:", snapshotter) + if snapshotter == "devbox" { + _, err := c.SnapshotService(snapshotter).Update(ctx, snapshots.Info{ + Name: id, + Labels: map[string]string{label: value}, + }, "labels."+label) + if err != nil { + return err + } + return nil + } + return fmt.Errorf("snapshotter %s is not supported for update: %w", snapshotter, errdefs.ErrNotImplemented) +} + // LoadContainer loads an existing container from metadata func (c *Client) LoadContainer(ctx context.Context, id string) (Container, error) { r, err := c.ContainerService().Get(ctx, id) @@ -621,15 +636,20 @@ func (c *Client) ContentStore() content.Store { // SnapshotService returns the underlying snapshotter for the provided snapshotter name func (c *Client) SnapshotService(snapshotterName string) snapshots.Snapshotter { + fmt.Println("Snapshotter name:", snapshotterName) snapshotterName, err := c.resolveSnapshotterName(context.Background(), snapshotterName) + fmt.Println("Resolved snapshotter name:", snapshotterName) if err != nil { snapshotterName = DefaultSnapshotter + fmt.Println("Using default snapshotter:", snapshotterName) } if c.snapshotters != nil { + fmt.Println("Using cached snapshotter:", snapshotterName) return c.snapshotters[snapshotterName] } c.connMu.Lock() defer c.connMu.Unlock() + fmt.Println("Creating new remote snapshotter:", snapshotterName) return snproxy.NewSnapshotter(snapshotsapi.NewSnapshotsClient(c.conn), snapshotterName) } diff --git a/pkg/cri/server/container_stop.go b/pkg/cri/server/container_stop.go index c61a3d6c220e..2208bcc1ca67 100644 --- a/pkg/cri/server/container_stop.go +++ b/pkg/cri/server/container_stop.go @@ -34,6 +34,8 @@ import ( "github.com/containerd/containerd/protobuf" ) +const unmountLvm = "containerd.io/snapshot/devbox-unmount-lvm" + // StopContainer stops a running container with a grace period (i.e., timeout). func (c *criService) StopContainer(ctx context.Context, r *runtime.StopContainerRequest) (*runtime.StopContainerResponse, error) { start := time.Now() @@ -73,6 +75,21 @@ func (c *criService) StopContainer(ctx context.Context, r *runtime.StopContainer containerStopTimer.WithValues(i.Runtime.Name).UpdateSince(start) + ociRuntime, err := c.getSandboxRuntime(&runtime.PodSandboxConfig{}, sandbox.Metadata.RuntimeHandler) + + if err != nil { + return nil, fmt.Errorf("failed to get sandbox runtime: %w", err) + } + + snapshotter := c.runtimeSnapshotter(ctx, ociRuntime) + + fmt.Println("Check snapshotter:", snapshotter) + + err = c.client.UpdateDevboxSnapshot(ctx, snapshotter, i.ID, unmountLvm, "true") + if err != nil { + fmt.Println("Failed to update devbox snapshot:", err) + } + return &runtime.StopContainerResponse{}, nil } diff --git a/snapshots/devbox/devbox.go b/snapshots/devbox/devbox.go index 3e031fbe956c..f56d3f510c36 100644 --- a/snapshots/devbox/devbox.go +++ b/snapshots/devbox/devbox.go @@ -54,6 +54,7 @@ const newLayerLimitKey = "containerd.io/snapshot/devbox-storage-limit" const devboxContentIDKey = "containerd.io/snapshot/devbox-content-id" const privateImageKey = "containerd.io/snapshot/devbox-init" const removeContentIDKey = "containerd.io/snapshot/devbox-remove-content-id" +const unmountLvm = "containerd.io/snapshot/devbox-unmount-lvm" // SnapshotterConfig is used to configure the overlay snapshotter instance type SnapshotterConfig struct { @@ -128,7 +129,7 @@ func WithMetaStore(ms MetaStore) Opt { } } -type snapshotter struct { +type Snapshotter struct { root string ms MetaStore asyncRemove bool @@ -186,7 +187,7 @@ func NewSnapshotter(root string, opts ...Opt) (snapshots.Snapshotter, error) { config.mountOptions = append(config.mountOptions, "index=off") } - return &snapshotter{ + return &Snapshotter{ root: root, ms: config.ms, asyncRemove: config.AsyncRemove, @@ -215,7 +216,7 @@ func hasOption(options []string, key string, hasValue bool) bool { // // Should be used for parent resolution, existence checks and to discern // the kind of snapshot. -func (o *snapshotter) Stat(ctx context.Context, key string) (info snapshots.Info, err error) { +func (o *Snapshotter) Stat(ctx context.Context, key string) (info snapshots.Info, err error) { var id string if err := o.ms.WithTransaction(ctx, false, func(ctx context.Context) error { id, info, _, err = storage.GetInfo(ctx, key) @@ -233,11 +234,19 @@ func (o *snapshotter) Stat(ctx context.Context, key string) (info snapshots.Info return info, nil } -func (o *snapshotter) Update(ctx context.Context, info snapshots.Info, fieldpaths ...string) (newInfo snapshots.Info, err error) { +func (o *Snapshotter) Update(ctx context.Context, info snapshots.Info, fieldpaths ...string) (newInfo snapshots.Info, err error) { err = o.ms.WithTransaction(ctx, true, func(ctx context.Context) error { + if value, ok := info.Labels[unmountLvm]; ok && value == "true" { + mountPath, err := storage.SetUnmountedWithKey(ctx, info.Name) + if err != nil { + return fmt.Errorf("failed to set devbox content status to unmounted: %w", err) + } + return o.unmountLvm(ctx, mountPath) + } + if value, ok := info.Labels[removeContentIDKey]; ok { - storage.SetDevboxContentStatusRemoved(ctx, value) + return storage.SetDevboxContentStatusRemoved(ctx, value) } newInfo, err = storage.UpdateInfo(ctx, info, fieldpaths...) @@ -266,7 +275,7 @@ func (o *snapshotter) Update(ctx context.Context, info snapshots.Info, fieldpath // "upper") directory and may take some time. // // For committed snapshots, the value is returned from the metadata database. -func (o *snapshotter) Usage(ctx context.Context, key string) (_ snapshots.Usage, err error) { +func (o *Snapshotter) Usage(ctx context.Context, key string) (_ snapshots.Usage, err error) { var ( usage snapshots.Usage info snapshots.Info @@ -291,12 +300,12 @@ func (o *snapshotter) Usage(ctx context.Context, key string) (_ snapshots.Usage, return usage, nil } -func (o *snapshotter) Prepare(ctx context.Context, key, parent string, opts ...snapshots.Opt) ([]mount.Mount, error) { +func (o *Snapshotter) Prepare(ctx context.Context, key, parent string, opts ...snapshots.Opt) ([]mount.Mount, error) { log.G(ctx).Debug("Prepare called with key:", key, "parent:", parent, "opts:", opts) return o.createSnapshot(ctx, snapshots.KindActive, key, parent, opts) } -func (o *snapshotter) View(ctx context.Context, key, parent string, opts ...snapshots.Opt) ([]mount.Mount, error) { +func (o *Snapshotter) View(ctx context.Context, key, parent string, opts ...snapshots.Opt) ([]mount.Mount, error) { return o.createSnapshot(ctx, snapshots.KindView, key, parent, opts) } @@ -304,9 +313,28 @@ func (o *snapshotter) View(ctx context.Context, key, parent string, opts ...snap // called on an read-write or readonly transaction. // // This can be used to recover mounts after calling View or Prepare. -func (o *snapshotter) Mounts(ctx context.Context, key string) (_ []mount.Mount, err error) { +func (o *Snapshotter) Mounts(ctx context.Context, key string) (_ []mount.Mount, err error) { var s storage.Snapshot if err := o.ms.WithTransaction(ctx, false, func(ctx context.Context) error { + var ( + contentID string + path string + ) + + contentID, path, err = storage.GetSnapshotDevboxInfo(ctx, key) + if err != nil { + return fmt.Errorf("failed to get devbox content ID for snapshot %s: %w", key, err) + } + if contentID != "" { + lvName, err := storage.GetDevboxLvName(ctx, contentID, path) + if err != nil { + return fmt.Errorf("failed to get devbox logical volume name for content ID %s: %w", contentID, err) + } + if lvName == "" { + return fmt.Errorf("logical volume name for content ID %s is empty", contentID) + } + } + s, err = storage.GetSnapshot(ctx, key) if err != nil { return fmt.Errorf("failed to get active mount: %w", err) @@ -318,7 +346,7 @@ func (o *snapshotter) Mounts(ctx context.Context, key string) (_ []mount.Mount, return o.mounts(s), nil } -func (o *snapshotter) Commit(ctx context.Context, name, key string, opts ...snapshots.Opt) error { +func (o *Snapshotter) Commit(ctx context.Context, name, key string, opts ...snapshots.Opt) error { return o.ms.WithTransaction(ctx, true, func(ctx context.Context) error { // grab the existing id id, _, _, err := storage.GetInfo(ctx, key) @@ -341,7 +369,7 @@ func (o *snapshotter) Commit(ctx context.Context, name, key string, opts ...snap // Remove abandons the snapshot identified by key. The snapshot will // immediately become unavailable and unrecoverable. Disk space will // be freed up on the next call to `Cleanup`. -func (o *snapshotter) Remove(ctx context.Context, key string) (err error) { +func (o *Snapshotter) Remove(ctx context.Context, key string) (err error) { var ( removals []string removedLvNames []string @@ -351,30 +379,30 @@ func (o *snapshotter) Remove(ctx context.Context, key string) (err error) { // Remove directories after the transaction is closed, failures must not // return error since the transaction is committed with the removal // key no longer available. - defer func() { - if err == nil { - for _, dir := range removals { - // modified by sealos - if err1 := o.unmountLvm(ctx, dir); err1 != nil { - log.G(ctx).WithError(err1).WithField("path", dir).Warn("failed to unmount directory") - } - // end modified by sealos - if err1 := os.RemoveAll(dir); err1 != nil { - log.G(ctx).WithError(err1).WithField("path", dir).Warn("failed to remove directory") + return o.ms.WithTransaction(ctx, true, func(ctx context.Context) error { + // modified by sealos + defer func() { + if err == nil { + for _, dir := range removals { + // modified by sealos + if err1 := o.unmountLvm(ctx, dir); err1 != nil { + log.G(ctx).WithError(err1).WithField("path", dir).Warn("failed to unmount directory") + } + // end modified by sealos + if err1 := os.RemoveAll(dir); err1 != nil { + log.G(ctx).WithError(err1).WithField("path", dir).Warn("failed to remove directory") + } } - } - for _, lvName := range removedLvNames { - err := o.removeLv(lvName) - if err != nil { - log.G(ctx).WithError(err).WithField("lvName", lvName).Warn("failed to destroy LVM logical volume") - continue + for _, lvName := range removedLvNames { + err := o.removeLv(lvName) + if err != nil { + log.G(ctx).WithError(err).WithField("lvName", lvName).Warn("failed to destroy LVM logical volume") + continue + } + log.G(ctx).Infof("LVM logical volume %s removed successfully", lvName) } - log.G(ctx).Infof("LVM logical volume %s removed successfully", lvName) } - } - }() - return o.ms.WithTransaction(ctx, true, func(ctx context.Context) error { - // modified by sealos + }() var mountPath string mountPath, err = storage.RemoveDevbox(ctx, key) log.G(ctx).Infof("Removed devbox content for key: %s, mount path: %s", key, mountPath) @@ -406,7 +434,7 @@ func (o *snapshotter) Remove(ctx context.Context, key string) (err error) { } // Walk the snapshots. -func (o *snapshotter) Walk(ctx context.Context, fn snapshots.WalkFunc, fs ...string) error { +func (o *Snapshotter) Walk(ctx context.Context, fn snapshots.WalkFunc, fs ...string) error { return o.ms.WithTransaction(ctx, false, func(ctx context.Context) error { if o.upperdirLabel { return storage.WalkInfo(ctx, func(ctx context.Context, info snapshots.Info) error { @@ -426,57 +454,54 @@ func (o *snapshotter) Walk(ctx context.Context, fn snapshots.WalkFunc, fs ...str } // Cleanup cleans up disk resources from removed or abandoned snapshots -func (o *snapshotter) Cleanup(ctx context.Context) error { +func (o *Snapshotter) Cleanup(ctx context.Context) error { log.G(ctx).Infof("Cleanup called") - cleanup, cleanupLv, err := o.cleanupDirectories(ctx) - if err != nil { - return err - } - - for _, dir := range cleanup { - // modified by sealos - if err := o.unmountLvm(ctx, dir); err != nil { - log.G(ctx).WithError(err).WithField("path", dir).Warn("failed to unmount directory") + return o.ms.WithTransaction(ctx, false, func(ctx context.Context) error { + cleanup, cleanupLv, err := o.cleanupDirectories(ctx) + if err != nil { + return err } - // end modified by sealos - if err := os.RemoveAll(dir); err != nil { - log.G(ctx).WithError(err).WithField("path", dir).Warn("failed to remove directory") + + for _, dir := range cleanup { + // modified by sealos + if err := o.unmountLvm(ctx, dir); err != nil { + log.G(ctx).WithError(err).WithField("path", dir).Warn("failed to unmount directory") + } + // end modified by sealos + if err := os.RemoveAll(dir); err != nil { + log.G(ctx).WithError(err).WithField("path", dir).Warn("failed to remove directory") + } } - } - for _, lvName := range cleanupLv { - err := o.removeLv(lvName) - if err != nil { - log.G(ctx).WithError(err).WithField("lvName", lvName).Warn("failed to destroy LVM logical volume") - continue + for _, lvName := range cleanupLv { + err := o.removeLv(lvName) + if err != nil { + log.G(ctx).WithError(err).WithField("lvName", lvName).Warn("failed to destroy LVM logical volume") + continue + } + log.G(ctx).Infof("LVM logical volume %s removed successfully", lvName) } - log.G(ctx).Infof("LVM logical volume %s removed successfully", lvName) - } - return nil + return nil + }) } -func (o *snapshotter) cleanupDirectories(ctx context.Context) (_ []string, _ []string, err error) { +func (o *Snapshotter) cleanupDirectories(ctx context.Context) (_ []string, _ []string, err error) { var ( cleanupDirs []string removedLvNames []string ) // Get a write transaction to ensure no other write transaction can be entered // while the cleanup is scanning. - if err := o.ms.WithTransaction(ctx, true, func(ctx context.Context) error { - cleanupDirs, err = o.getCleanupDirectories(ctx) - if err != nil { - return err - } - removedLvNames, err = o.getCleanupLvNames(ctx) - return err - }); err != nil { + cleanupDirs, err = o.getCleanupDirectories(ctx) + if err != nil { return nil, nil, err } - return cleanupDirs, removedLvNames, nil + removedLvNames, err = o.getCleanupLvNames(ctx) + return cleanupDirs, removedLvNames, err } -func (o *snapshotter) getCleanupDirectories(ctx context.Context) ([]string, error) { +func (o *Snapshotter) getCleanupDirectories(ctx context.Context) ([]string, error) { ids, err := storage.IDMap(ctx) if err != nil { return nil, err @@ -506,7 +531,7 @@ func (o *snapshotter) getCleanupDirectories(ctx context.Context) ([]string, erro } // modified by sealos -func (o *snapshotter) getCleanupLvNames(ctx context.Context) ([]string, error) { +func (o *Snapshotter) getCleanupLvNames(ctx context.Context) ([]string, error) { nameMap, err := storage.GetDevboxLvNames(ctx) if err != nil { return nil, err @@ -532,7 +557,7 @@ func (o *snapshotter) getCleanupLvNames(ctx context.Context) ([]string, error) { return cleanup, nil } -func (o *snapshotter) resizeLVMVolume(lvName, useLimit string) error { +func (o *Snapshotter) resizeLVMVolume(lvName, useLimit string) error { capacity, err := parseUseLimit(useLimit) if err != nil { @@ -581,7 +606,7 @@ func isMountPoint(dir string) (bool, error) { return false, nil } -func (o *snapshotter) mkfs(lvName string) error { +func (o *Snapshotter) mkfs(lvName string) error { devicePath := fmt.Sprintf("/dev/%s/%s", o.lvmVgName, lvName) // Check if the device exists if _, err := os.Stat(devicePath); os.IsNotExist(err) { @@ -596,7 +621,7 @@ func (o *snapshotter) mkfs(lvName string) error { return nil } -func (o *snapshotter) mountLvm(ctx context.Context, lvName string, path string) error { +func (o *Snapshotter) mountLvm(ctx context.Context, lvName string, path string) error { _, err := os.Stat(path) if os.IsNotExist(err) { if err := os.MkdirAll(path, 0755); err != nil { @@ -613,7 +638,7 @@ func (o *snapshotter) mountLvm(ctx context.Context, lvName string, path string) return nil } -func (o *snapshotter) unmountLvm(ctx context.Context, path string) error { +func (o *Snapshotter) unmountLvm(ctx context.Context, path string) error { isMounted, err := isMountPoint(path) if err != nil { return fmt.Errorf("failed to check if path %s is a mount point: %w", path, err) @@ -631,7 +656,7 @@ func (o *snapshotter) unmountLvm(ctx context.Context, path string) error { // end modified by sealos -func (o *snapshotter) createSnapshot(ctx context.Context, kind snapshots.Kind, key, parent string, opts []snapshots.Opt) (_ []mount.Mount, err error) { +func (o *Snapshotter) createSnapshot(ctx context.Context, kind snapshots.Kind, key, parent string, opts []snapshots.Opt) (_ []mount.Mount, err error) { var ( s storage.Snapshot td, path, npath, lvName string @@ -696,7 +721,7 @@ func (o *snapshotter) createSnapshot(ctx context.Context, kind snapshots.Kind, k if idOk && limitOk { var notExistErr error - lvName, notExistErr = storage.GetDevboxLvName(ctx, contentId) + lvName, notExistErr = storage.GetDevboxLvName(ctx, contentId, "") log.G(ctx).Debug("LVM logical volume name for content ID:", contentId, "is", lvName) if notExistErr == nil && lvName != "" { // mount point for the snapshot @@ -825,7 +850,7 @@ func (o *snapshotter) createSnapshot(ctx context.Context, kind snapshots.Kind, k return o.mounts(s), nil } -func (o *snapshotter) prepareDirectory(ctx context.Context, snapshotDir string, kind snapshots.Kind) (string, error) { +func (o *Snapshotter) prepareDirectory(ctx context.Context, snapshotDir string, kind snapshots.Kind) (string, error) { td, err := os.MkdirTemp(snapshotDir, "new-") if err != nil { return "", fmt.Errorf("failed to create temp dir: %w", err) @@ -876,7 +901,7 @@ func parseUseLimit(useLimit string) (string, error) { } -func (o *snapshotter) removeLv(lvName string) error { +func (o *Snapshotter) removeLv(lvName string) error { vol := &apis.LVMVolume{ ObjectMeta: metav1.ObjectMeta{ Name: lvName, @@ -888,7 +913,7 @@ func (o *snapshotter) removeLv(lvName string) error { return lvm.DestroyVolume(vol) } -func (o *snapshotter) prepareLvmDirectory(ctx context.Context, snapshotDir string, contentKey string, useLimit string) (string, string, error) { +func (o *Snapshotter) prepareLvmDirectory(ctx context.Context, snapshotDir string, contentKey string, useLimit string) (string, string, error) { lvName := "devbox-" + contentKey td, err := os.MkdirTemp(snapshotDir, "new-") if err != nil { @@ -942,7 +967,7 @@ func (o *snapshotter) prepareLvmDirectory(ctx context.Context, snapshotDir strin return td, lvName, nil } -func (o *snapshotter) mounts(s storage.Snapshot) []mount.Mount { +func (o *Snapshotter) mounts(s storage.Snapshot) []mount.Mount { if len(s.ParentIDs) == 0 { // if we only have one layer/no parents then just return a bind mount as overlay // will not work @@ -998,16 +1023,16 @@ func (o *snapshotter) mounts(s storage.Snapshot) []mount.Mount { } -func (o *snapshotter) upperPath(id string) string { +func (o *Snapshotter) upperPath(id string) string { return filepath.Join(o.root, "snapshots", id, "fs") } -func (o *snapshotter) workPath(id string) string { +func (o *Snapshotter) workPath(id string) string { return filepath.Join(o.root, "snapshots", id, "work") } // Close closes the snapshotter -func (o *snapshotter) Close() error { +func (o *Snapshotter) Close() error { return o.ms.Close() } diff --git a/snapshots/devbox/storage/bolt.go b/snapshots/devbox/storage/bolt.go index b256efdf493a..86c18bc03007 100644 --- a/snapshots/devbox/storage/bolt.go +++ b/snapshots/devbox/storage/bolt.go @@ -133,7 +133,7 @@ func UpdateInfo(ctx context.Context, info snapshots.Info, fieldpaths ...string) case "labels": updated.Labels = info.Labels default: - return fmt.Errorf("cannot update %q field on snapshot %q: %w", path, info.Name, errdefs.ErrInvalidArgument) + return fmt.Errorf("cannot update %q field on snapshot ** %q: %w", path, info.Name, errdefs.ErrInvalidArgument) } } } else { @@ -693,7 +693,43 @@ func withDevboxBucket(ctx context.Context, fn func(context.Context, *bolt.Bucket return fn(ctx, bkt, dbkt) } -func GetDevboxLvName(ctx context.Context, contentKey string) (string, error) { +func GetSnapshotDevboxInfo(ctx context.Context, key string) (string, string, error) { + var ( + contentID string + path string + ) + + if key == "" { + return "", "", fmt.Errorf("key cannot be empty") + } + + err := withDevboxBucket(ctx, func(ctx context.Context, bkt *bolt.Bucket, dbkt *bolt.Bucket) error { + sbkt := bkt.Bucket([]byte(key)) + if sbkt == nil { + return fmt.Errorf("snapshot key %s not found: %w", key, errdefs.ErrNotFound) + } + contentIDByte := sbkt.Get(DevboxKeyContentID) + if contentIDByte == nil { + return nil + } + contentID = string(contentIDByte) + + pathByte := sbkt.Get(DevboxKeyPath) + if pathByte == nil { + return nil + } + path = string(pathByte) + + return nil + }) + if err != nil { + return "", "", fmt.Errorf("failed to get devbox info for snapshot %s: %w", key, err) + } + + return contentID, path, nil +} + +func GetDevboxLvName(ctx context.Context, contentKey string, path string) (string, error) { var ( lvName string ) @@ -711,8 +747,8 @@ func GetDevboxLvName(ctx context.Context, contentKey string) (string, error) { return errdefs.ErrNotFound } - if mountPath := sdbkt.Get(DevboxKeyPath); mountPath != nil { - return fmt.Errorf("devbox lv is already mounted at %s: %w", string(mountPath), errdefs.ErrAlreadyExists) + if mountPath := sdbkt.Get(DevboxKeyPath); mountPath != nil && string(mountPath) != path { + return fmt.Errorf("devbox lv %s is already mounted at %s, check failed for path %s", contentKey, string(mountPath), path) } lvNameByte := sdbkt.Get(DevboxKeyLvName) @@ -783,6 +819,47 @@ func SetDevboxContentStatusRemoved(ctx context.Context, contentID string) error }) } +func SetUnmountedWithKey(ctx context.Context, key string) (string, error) { + var ( + mountPath string + ) + err := withDevboxBucket(ctx, func(ctx context.Context, bkt *bolt.Bucket, dbkt *bolt.Bucket) error { + if bkt == nil { + return fmt.Errorf("devbox storage path bucket does not exist: %w", errdefs.ErrNotFound) + } + sbkt := bkt.Bucket([]byte(key)) + if sbkt == nil { + return fmt.Errorf("devbox storage path bucket for key %s does not exist: %w", key, errdefs.ErrNotFound) + } + mountPath = string(sbkt.Get(DevboxKeyPath)) + if mountPath == "" { + return fmt.Errorf("mount path for key %s is empty: %w", key, errdefs.ErrNotFound) + } + contentID := sbkt.Get(DevboxKeyContentID) + if len(contentID) == 0 { + return fmt.Errorf("content ID for key %s is empty: %w", key, errdefs.ErrNotFound) + } + sdbkt := dbkt.Bucket([]byte(contentID)) + if sdbkt == nil { + return fmt.Errorf("devbox storage path bucket for content ID %s does not exist: %w", string(contentID), errdefs.ErrNotFound) + } + if path := sdbkt.Get(DevboxKeyPath); path != nil { + if string(path) != mountPath { + return fmt.Errorf("new mount path for content ID %s does not match key %s, maybe it was mounted by other container", string(contentID), key) + } + if err := sdbkt.Delete(DevboxKeyPath); err != nil { + return fmt.Errorf("failed to delete mount path for content ID %s: %w", string(contentID), err) + } + return nil + } + return fmt.Errorf("mount path for content ID %s is not set, cannot set status to unmounted: %w", string(contentID), errdefs.ErrNotFound) + }) + if err != nil { + return "", err + } + return mountPath, nil +} + func RemoveDevbox(ctx context.Context, Key string) (string, error) { var ( mountPath string From 87e9cccc45d2259f9af754ecb6a367ebcb77f28a Mon Sep 17 00:00:00 2001 From: luanshaotong Date: Thu, 7 Aug 2025 09:24:59 +0000 Subject: [PATCH 11/19] update lv mount path after exit --- pkg/cri/server/events.go | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/pkg/cri/server/events.go b/pkg/cri/server/events.go index bf80752b0402..b927216873be 100644 --- a/pkg/cri/server/events.go +++ b/pkg/cri/server/events.go @@ -443,6 +443,17 @@ func handleContainerExit(ctx context.Context, e *eventtypes.TaskExit, cntr conta status.Pid = 0 status.FinishedAt = protobuf.FromTimestamp(e.ExitedAt).UnixNano() status.ExitCode = int32(e.ExitStatus) + container, err := c.client.ContainerService().Get(ctx, cntr.Container.ID()) + if err != nil { + return status, err + } + fmt.Println("Container snapshotter:", container.Snapshotter, "ID:", cntr.Container.ID()) + if container.Snapshotter == "devbox" { + err = c.client.UpdateDevboxSnapshot(ctx, container.Snapshotter, container.ID, unmountLvm, "true") + if err != nil { + logrus.WithError(err).Errorf("Failed to update devbox snapshot for container %s", cntr.Container.ID()) + } + } } // Unknown state can only transit to EXITED state, so we need From dc24ff92f47dc6effdafe095cd92935b3cb5926c Mon Sep 17 00:00:00 2001 From: luanshaotong Date: Fri, 15 Aug 2025 03:11:52 +0000 Subject: [PATCH 12/19] add resizefs for resize volume --- snapshots/devbox/devbox.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snapshots/devbox/devbox.go b/snapshots/devbox/devbox.go index f56d3f510c36..eef2bd548f0f 100644 --- a/snapshots/devbox/devbox.go +++ b/snapshots/devbox/devbox.go @@ -575,7 +575,7 @@ func (o *Snapshotter) resizeLVMVolume(lvName, useLimit string) error { }, } - return lvm.ResizeLVMVolume(vol, false) + return lvm.ResizeLVMVolume(vol, true) } func isMountPoint(dir string) (bool, error) { From 1ffbad429b2ff85e647fbfa526492af282679d63 Mon Sep 17 00:00:00 2001 From: luanshaotong Date: Tue, 19 Aug 2025 03:41:44 +0000 Subject: [PATCH 13/19] fix lvm resize --- snapshots/devbox/lvm/lvm.go | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/snapshots/devbox/lvm/lvm.go b/snapshots/devbox/lvm/lvm.go index e7374a0f1fc3..8924c935f8dd 100644 --- a/snapshots/devbox/lvm/lvm.go +++ b/snapshots/devbox/lvm/lvm.go @@ -409,23 +409,23 @@ func ResizeLVMVolume(vol *apis.LVMVolume, resizefs bool) error { // before exapnding LVM volume(If volume is already expanded then // it might be error prone). This also makes ResizeLVMVolume func // idempotent - if !resizefs { - desiredVolSize, err := strconv.ParseUint(vol.Spec.Capacity, 10, 64) - if err != nil { - return err - } + // if !resizefs { + desiredVolSize, err := strconv.ParseUint(vol.Spec.Capacity, 10, 64) + if err != nil { + return err + } - curVolSize, err := getLVSize(vol) - if err != nil { - return err - } + curVolSize, err := getLVSize(vol) + if err != nil { + return err + } - // Trigger resize only when desired volume size is greater than - // current volume size else return - if desiredVolSize <= curVolSize { - return nil - } + // Trigger resize only when desired volume size is greater than + // current volume size else return + if desiredVolSize <= curVolSize { + return nil } + // } volume := vol.Spec.VolGroup + "/" + vol.Name From 7129ed31690cafeaad74f5632ab015e63fe086aa Mon Sep 17 00:00:00 2001 From: luanshaotong Date: Wed, 20 Aug 2025 04:08:14 +0000 Subject: [PATCH 14/19] fix snapshot remove; remove logs --- snapshots/devbox/storage/bolt.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/snapshots/devbox/storage/bolt.go b/snapshots/devbox/storage/bolt.go index 86c18bc03007..c4a743066c1d 100644 --- a/snapshots/devbox/storage/bolt.go +++ b/snapshots/devbox/storage/bolt.go @@ -879,12 +879,13 @@ func RemoveDevbox(ctx context.Context, Key string) (string, error) { contentID := sbkt.Get(DevboxKeyContentID) mountPath = string(sbkt.Get(DevboxKeyPath)) if len(contentID) == 0 { - fmt.Printf("content ID for key %s is empty, continuing with snapshotter removal\n", Key) + // fmt.Printf("content ID for key %s is empty, continuing with snapshotter removal\n", Key) return nil // if contentID is nil, continue with the snapshotter removal } sdbkt := dbkt.Bucket([]byte(contentID)) if sdbkt == nil { - return fmt.Errorf("devbox storage path bucket for content ID %s does not exist: %w", string(contentID), errdefs.ErrNotFound) + // return fmt.Errorf("devbox storage path bucket for content ID %s does not exist: %w", string(contentID), errdefs.ErrNotFound) + return nil } if status := sdbkt.Get(DevboxKeyStatus); status != nil { if string(status) == string(DevboxStatusRemoved) { @@ -923,7 +924,7 @@ func GetDevboxLvNames(ctx context.Context) (map[string]string, error) { }); err != nil { return nil, err } - fmt.Printf("devbox lv names: %v\n", m) + // fmt.Printf("devbox lv names: %v\n", m) return m, nil } From 7426997332d1b02223b92c6f0443b8d7d9b21cb4 Mon Sep 17 00:00:00 2001 From: Zllinc <2965202581@qq.com> Date: Thu, 9 Oct 2025 16:59:24 +0800 Subject: [PATCH 15/19] lvm error --- snapshots/devbox/lvm/lvm.go | 2 +- snapshots/devbox/lvm/lvm_test.go | 52 ++++++++++++++++++++++++++++++++ 2 files changed, 53 insertions(+), 1 deletion(-) create mode 100644 snapshots/devbox/lvm/lvm_test.go diff --git a/snapshots/devbox/lvm/lvm.go b/snapshots/devbox/lvm/lvm.go index 8924c935f8dd..aa47f7bbfcde 100644 --- a/snapshots/devbox/lvm/lvm.go +++ b/snapshots/devbox/lvm/lvm.go @@ -280,7 +280,7 @@ func RunCommandSplit(command string, args ...string) ([]byte, []byte, error) { klog.Warningf("lvm: said into stderr: %s", error_output) } - return output, error_output, err + return output, error_output, err } // CreateVolume creates the lvm volume diff --git a/snapshots/devbox/lvm/lvm_test.go b/snapshots/devbox/lvm/lvm_test.go new file mode 100644 index 000000000000..18c959933566 --- /dev/null +++ b/snapshots/devbox/lvm/lvm_test.go @@ -0,0 +1,52 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package lvm + +import ( + "fmt" + "strings" + "testing" +) + +// TestRunCommandSplitErrorInfo tests the RunCommandSplit function +func TestRunCommandSplitErrorInfo(t *testing.T) { + // test1: multiline stderr + t.Run("multiline stderr", func(t *testing.T) { + _, stderr, err := RunCommandSplit("sh", "-c", "echo 'line1' >&2 && echo 'line2' >&2") + + fmt.Println("error info:", err.Error()) + fmt.Println("stderr:", string(stderr)) + + // check if stderr contains newline + if !strings.Contains(string(stderr), "\n") { + t.Error("stderr should contain newline") + } + + // check if newline is replaced with " | " + if err != nil && !strings.Contains(err.Error(), " | ") { + t.Error("newline should be replaced with ' | '") + } + }) + + // test2: single line stderr + t.Run("single line stderr", func(t *testing.T) { + _, stderr, err := RunCommandSplit("sh", "-c", "echo 'single error' >&2") + + fmt.Println("error info:", err.Error()) + fmt.Println("stderr:", string(stderr)) + }) +} \ No newline at end of file From afa381172a6135217471f7b85b3d2949eaef58d1 Mon Sep 17 00:00:00 2001 From: Cunzili <2965202581@qq.com> Date: Thu, 16 Oct 2025 07:35:41 +0000 Subject: [PATCH 16/19] checkout snapshotter --- client.go | 17 +++++++---------- pkg/cri/server/container_create.go | 15 +++++++++------ pkg/cri/server/container_create_linux.go | 3 --- pkg/cri/server/container_stop.go | 9 ++++++--- 4 files changed, 22 insertions(+), 22 deletions(-) diff --git a/client.go b/client.go index e201d74885e9..230f22b969a9 100644 --- a/client.go +++ b/client.go @@ -305,17 +305,14 @@ func (c *Client) NewContainer(ctx context.Context, id string, opts ...NewContain func (c *Client) UpdateDevboxSnapshot(ctx context.Context, snapshotter string, id string, label string, value string) error { fmt.Println("Check snapshotter:", snapshotter) - if snapshotter == "devbox" { - _, err := c.SnapshotService(snapshotter).Update(ctx, snapshots.Info{ - Name: id, - Labels: map[string]string{label: value}, - }, "labels."+label) - if err != nil { - return err - } - return nil + _, err := c.SnapshotService(snapshotter).Update(ctx, snapshots.Info{ + Name: id, + Labels: map[string]string{label: value}, + }, "labels."+label) + if err != nil { + return err } - return fmt.Errorf("snapshotter %s is not supported for update: %w", snapshotter, errdefs.ErrNotImplemented) + return nil } // LoadContainer loads an existing container from metadata diff --git a/pkg/cri/server/container_create.go b/pkg/cri/server/container_create.go index 2efd4fd0936b..52870a3bdac7 100644 --- a/pkg/cri/server/container_create.go +++ b/pkg/cri/server/container_create.go @@ -189,12 +189,15 @@ func (c *criService) CreateContainer(ctx context.Context, r *runtime.CreateConta return nil, err } - devboxOpt, err := devboxSnapshotterOpts(c.runtimeSnapshotter(ctx, ociRuntime), r.GetSandboxConfig()) - if err != nil { - return nil, err - } - if devboxOpt != nil { - sOpts = append(sOpts, devboxOpt) + // Check if the snapshotter is devbox and add the devbox snapshotter opts + if c.runtimeSnapshotter(ctx, ociRuntime) == "devbox" { + devboxOpt, err := devboxSnapshotterOpts(c.runtimeSnapshotter(ctx, ociRuntime), r.GetSandboxConfig()) + if err != nil { + return nil, err + } + if devboxOpt != nil { + sOpts = append(sOpts, devboxOpt) + } } // Set snapshotter before any other options. diff --git a/pkg/cri/server/container_create_linux.go b/pkg/cri/server/container_create_linux.go index 1b9d1dafde97..92cbb0144cd6 100644 --- a/pkg/cri/server/container_create_linux.go +++ b/pkg/cri/server/container_create_linux.go @@ -614,9 +614,6 @@ func generateUserString(username string, uid, gid *runtime.Int64Value) (string, // snapshotterOpts returns any Linux specific snapshotter options for the rootfs snapshot func devboxSnapshotterOpts(snapshotterName string, config *runtime.PodSandboxConfig) (snapshots.Opt, error) { fmt.Printf("devboxSnapshotterOpts: snapshotterName=%s, config=%+v\n", snapshotterName, config) - if snapshotterName != "devbox" { - return nil, nil - } // add container annotations to snapshot labels labels := make(map[string]string) maps.Copy(labels, config.Annotations) diff --git a/pkg/cri/server/container_stop.go b/pkg/cri/server/container_stop.go index 2208bcc1ca67..a8c9a8702265 100644 --- a/pkg/cri/server/container_stop.go +++ b/pkg/cri/server/container_stop.go @@ -85,9 +85,12 @@ func (c *criService) StopContainer(ctx context.Context, r *runtime.StopContainer fmt.Println("Check snapshotter:", snapshotter) - err = c.client.UpdateDevboxSnapshot(ctx, snapshotter, i.ID, unmountLvm, "true") - if err != nil { - fmt.Println("Failed to update devbox snapshot:", err) + // Check if the snapshotter is devbox and update the devbox snapshot + if snapshotter == "devbox" { + err = c.client.UpdateDevboxSnapshot(ctx, snapshotter, i.ID, unmountLvm, "true") + if err != nil { + fmt.Println("Failed to update devbox snapshot:", err) + } } return &runtime.StopContainerResponse{}, nil From 136dcb81bd2e9ee33bd642a81a6c221928523838 Mon Sep 17 00:00:00 2001 From: Zllinc <2965202581@qq.com> Date: Fri, 24 Oct 2025 10:02:28 +0000 Subject: [PATCH 17/19] fix remove container error --- pkg/cri/server/container_create_linux.go | 2 +- snapshots/devbox/devbox.go | 10 ++-- snapshots/devbox/storage/bolt.go | 66 +++++++++++++++++++++--- 3 files changed, 67 insertions(+), 11 deletions(-) diff --git a/pkg/cri/server/container_create_linux.go b/pkg/cri/server/container_create_linux.go index 92cbb0144cd6..348e9aae6728 100644 --- a/pkg/cri/server/container_create_linux.go +++ b/pkg/cri/server/container_create_linux.go @@ -613,7 +613,7 @@ func generateUserString(username string, uid, gid *runtime.Int64Value) (string, // snapshotterOpts returns any Linux specific snapshotter options for the rootfs snapshot func devboxSnapshotterOpts(snapshotterName string, config *runtime.PodSandboxConfig) (snapshots.Opt, error) { - fmt.Printf("devboxSnapshotterOpts: snapshotterName=%s, config=%+v\n", snapshotterName, config) + // fmt.Printf("devboxSnapshotterOpts: snapshotterName=%s, config=%+v\n", snapshotterName, config) // add container annotations to snapshot labels labels := make(map[string]string) maps.Copy(labels, config.Annotations) diff --git a/snapshots/devbox/devbox.go b/snapshots/devbox/devbox.go index eef2bd548f0f..87dcb340fb98 100644 --- a/snapshots/devbox/devbox.go +++ b/snapshots/devbox/devbox.go @@ -736,7 +736,10 @@ func (o *Snapshotter) createSnapshot(ctx context.Context, kind snapshots.Kind, k return fmt.Errorf("failed to resize LVM logical volume %s: %w", lvName, err) } - storage.SetDevboxContent(ctx, key, contentId, lvName, npath) + if err = storage.SetDevboxContent(ctx, key, contentId, lvName, npath, key); err != nil { + return fmt.Errorf("failed to set devbox content: %w", err) + } + // mount the LVM logical volume if err = o.mountLvm(ctx, lvName, npath); err != nil { return fmt.Errorf("failed to mount LVM logical volume %s: %w", lvName, err) @@ -791,9 +794,8 @@ func (o *Snapshotter) createSnapshot(ctx context.Context, kind snapshots.Kind, k } log.G(ctx).Debug("Prepared LVM directory for snapshot:", td, "with logical volume name:", lvName) - storage.SetDevboxContent(ctx, key, contentId, lvName, npath) - if err != nil { - return fmt.Errorf("failed to prepare LVM directory for snapshot: %w", err) + if err = storage.SetDevboxContent(ctx, key, contentId, lvName, npath, key); err != nil { + return fmt.Errorf("failed to set devbox content: %w", err) } } else { td, err = o.prepareDirectory(ctx, snapshotDir, kind) diff --git a/snapshots/devbox/storage/bolt.go b/snapshots/devbox/storage/bolt.go index c4a743066c1d..761c7b87501d 100644 --- a/snapshots/devbox/storage/bolt.go +++ b/snapshots/devbox/storage/bolt.go @@ -53,6 +53,7 @@ var ( DevboxKeyPath = []byte("path") DevboxKeyLvName = []byte("lv_name") DevboxKeyStatus = []byte("status") + DevboxKeyContainerID = []byte("container_id") DevboxStatusActive = []byte("active") DevboxStatusRemoved = []byte("removed") @@ -765,8 +766,51 @@ func GetDevboxLvName(ctx context.Context, contentKey string, path string) (strin return lvName, nil } -func SetDevboxContent(ctx context.Context, key, contentID, lvName, path string) error { - if contentID == "" || lvName == "" || path == "" { +// GetDevboxContentData returns the data from the contentID bucket +func GetDevboxContentData(ctx context.Context, contentID string) (string, string, string, string, error) { + var ( + lvName string + path string + status string + containerID string + ) + + if contentID == "" { + return "", "", "", "", fmt.Errorf("contentID cannot be empty") + } + + err := withDevboxBucket(ctx, func(ctx context.Context, bkt *bolt.Bucket, dbkt *bolt.Bucket) error { + if dbkt == nil { + return fmt.Errorf("devbox contentID bucket does not exist: %w", errdefs.ErrNotFound) + } + sdbkt := dbkt.Bucket([]byte(contentID)) + if sdbkt == nil { + return fmt.Errorf("bucket for contentID %s not found: %w", contentID, errdefs.ErrNotFound) + } + if containerIDByte := sdbkt.Get(DevboxKeyContainerID); containerIDByte != nil { + containerID = string(containerIDByte) + } + if lvNameByte := sdbkt.Get(DevboxKeyLvName); lvNameByte != nil { + lvName = string(lvNameByte) + } + if pathByte := sdbkt.Get(DevboxKeyPath); pathByte != nil { + path = string(pathByte) + } + if statusByte := sdbkt.Get(DevboxKeyStatus); statusByte != nil { + status = string(statusByte) + } + + return nil + }) + if err != nil { + return "", "", "", "", err + } + + return lvName, path, status, containerID, nil +} + +func SetDevboxContent(ctx context.Context, key, contentID, lvName, path, containerID string) error { + if contentID == "" || lvName == "" || path == "" || containerID == "" { return fmt.Errorf("content key and storage path cannot be empty") } @@ -775,8 +819,7 @@ func SetDevboxContent(ctx context.Context, key, contentID, lvName, path string) if sbkt == nil { return fmt.Errorf("snapshot key %s not found: %w", key, errdefs.ErrNotFound) } - err := sbkt.Put(DevboxKeyContentID, []byte(contentID)) - if err != nil { + if err := sbkt.Put(DevboxKeyContentID, []byte(contentID)); err != nil { return fmt.Errorf("failed to set content ID for key %s: %w", key, err) } if err := sbkt.Put([]byte(DevboxKeyPath), []byte(path)); err != nil { @@ -795,6 +838,9 @@ func SetDevboxContent(ctx context.Context, key, contentID, lvName, path string) if err := sdbkt.Put([]byte(DevboxKeyPath), []byte(path)); err != nil { return fmt.Errorf("failed to set devbox path for content key %s: %w", contentID, err) } + if err := sdbkt.Put([]byte(DevboxKeyContainerID), []byte(containerID)); err != nil { + return fmt.Errorf("failed to set container ID for content key %s: %w", contentID, err) + } if err := sdbkt.Put([]byte(DevboxKeyStatus), []byte(DevboxStatusActive)); err != nil { return fmt.Errorf("failed to set status for content key %s: %w", contentID, err) } @@ -892,8 +938,16 @@ func RemoveDevbox(ctx context.Context, Key string) (string, error) { // remove the bucket if it is already marked as removed dbkt.DeleteBucket([]byte(contentID)) } else { - // if the status is not removed, only remove the mount path - sdbkt.Delete([]byte(DevboxKeyPath)) + // if the container ID is the same as the key, prove that the container is using this devbox + var containerID []byte + if containerID = sdbkt.Get(DevboxKeyContainerID); containerID != nil && string(containerID) == Key { + // if the status is not removed, only remove the mount path + // pathBeforeDelete := string(sdbkt.Get(DevboxKeyPath)) + sdbkt.Delete([]byte(DevboxKeyPath)) + // fmt.Printf("=======RemoveDevbox: Remove mount path for content ID:%s, mount path:%s, container ID:%s, key:%s\n", string(contentID), pathBeforeDelete, string(containerID), Key) + } + // if the container ID is not the same as the key, do not delete path + // fmt.Printf("=======RemoveDevbox: Do not remove mount path for content ID:%s, container ID:%s, key:%s\n", string(contentID), string(containerID), Key) } } return nil From c565182530f6b210ca48e80c8e3f8baac0252391 Mon Sep 17 00:00:00 2001 From: Zllinc <2965202581@qq.com> Date: Mon, 27 Oct 2025 08:19:57 +0000 Subject: [PATCH 18/19] delete path key and add snapshot key --- snapshots/devbox/devbox.go | 9 ++-- snapshots/devbox/storage/bolt.go | 86 +++++--------------------------- 2 files changed, 17 insertions(+), 78 deletions(-) diff --git a/snapshots/devbox/devbox.go b/snapshots/devbox/devbox.go index 87dcb340fb98..522620cf6f56 100644 --- a/snapshots/devbox/devbox.go +++ b/snapshots/devbox/devbox.go @@ -318,15 +318,14 @@ func (o *Snapshotter) Mounts(ctx context.Context, key string) (_ []mount.Mount, if err := o.ms.WithTransaction(ctx, false, func(ctx context.Context) error { var ( contentID string - path string ) - contentID, path, err = storage.GetSnapshotDevboxInfo(ctx, key) + contentID, _, err = storage.GetSnapshotDevboxInfo(ctx, key) if err != nil { return fmt.Errorf("failed to get devbox content ID for snapshot %s: %w", key, err) } if contentID != "" { - lvName, err := storage.GetDevboxLvName(ctx, contentID, path) + lvName, err := storage.GetDevboxLvName(ctx, contentID, key) if err != nil { return fmt.Errorf("failed to get devbox logical volume name for content ID %s: %w", contentID, err) } @@ -736,7 +735,7 @@ func (o *Snapshotter) createSnapshot(ctx context.Context, kind snapshots.Kind, k return fmt.Errorf("failed to resize LVM logical volume %s: %w", lvName, err) } - if err = storage.SetDevboxContent(ctx, key, contentId, lvName, npath, key); err != nil { + if err = storage.SetDevboxContent(ctx, key, contentId, lvName, npath); err != nil { return fmt.Errorf("failed to set devbox content: %w", err) } @@ -794,7 +793,7 @@ func (o *Snapshotter) createSnapshot(ctx context.Context, kind snapshots.Kind, k } log.G(ctx).Debug("Prepared LVM directory for snapshot:", td, "with logical volume name:", lvName) - if err = storage.SetDevboxContent(ctx, key, contentId, lvName, npath, key); err != nil { + if err = storage.SetDevboxContent(ctx, key, contentId, lvName, npath); err != nil { return fmt.Errorf("failed to set devbox content: %w", err) } } else { diff --git a/snapshots/devbox/storage/bolt.go b/snapshots/devbox/storage/bolt.go index 761c7b87501d..641ee93afc3c 100644 --- a/snapshots/devbox/storage/bolt.go +++ b/snapshots/devbox/storage/bolt.go @@ -53,7 +53,7 @@ var ( DevboxKeyPath = []byte("path") DevboxKeyLvName = []byte("lv_name") DevboxKeyStatus = []byte("status") - DevboxKeyContainerID = []byte("container_id") + DevboxKeySnapshotKey = []byte("snapshot_key") DevboxStatusActive = []byte("active") DevboxStatusRemoved = []byte("removed") @@ -730,7 +730,7 @@ func GetSnapshotDevboxInfo(ctx context.Context, key string) (string, string, err return contentID, path, nil } -func GetDevboxLvName(ctx context.Context, contentKey string, path string) (string, error) { +func GetDevboxLvName(ctx context.Context, contentKey, key string) (string, error) { var ( lvName string ) @@ -748,8 +748,8 @@ func GetDevboxLvName(ctx context.Context, contentKey string, path string) (strin return errdefs.ErrNotFound } - if mountPath := sdbkt.Get(DevboxKeyPath); mountPath != nil && string(mountPath) != path { - return fmt.Errorf("devbox lv %s is already mounted at %s, check failed for path %s", contentKey, string(mountPath), path) + if snapshotKey := sdbkt.Get(DevboxKeySnapshotKey); snapshotKey != nil && string(snapshotKey) != key { + return fmt.Errorf("snapshot key for content key %s is already set to %s", contentKey, string(snapshotKey)) } lvNameByte := sdbkt.Get(DevboxKeyLvName) @@ -766,52 +766,9 @@ func GetDevboxLvName(ctx context.Context, contentKey string, path string) (strin return lvName, nil } -// GetDevboxContentData returns the data from the contentID bucket -func GetDevboxContentData(ctx context.Context, contentID string) (string, string, string, string, error) { - var ( - lvName string - path string - status string - containerID string - ) - - if contentID == "" { - return "", "", "", "", fmt.Errorf("contentID cannot be empty") - } - - err := withDevboxBucket(ctx, func(ctx context.Context, bkt *bolt.Bucket, dbkt *bolt.Bucket) error { - if dbkt == nil { - return fmt.Errorf("devbox contentID bucket does not exist: %w", errdefs.ErrNotFound) - } - sdbkt := dbkt.Bucket([]byte(contentID)) - if sdbkt == nil { - return fmt.Errorf("bucket for contentID %s not found: %w", contentID, errdefs.ErrNotFound) - } - if containerIDByte := sdbkt.Get(DevboxKeyContainerID); containerIDByte != nil { - containerID = string(containerIDByte) - } - if lvNameByte := sdbkt.Get(DevboxKeyLvName); lvNameByte != nil { - lvName = string(lvNameByte) - } - if pathByte := sdbkt.Get(DevboxKeyPath); pathByte != nil { - path = string(pathByte) - } - if statusByte := sdbkt.Get(DevboxKeyStatus); statusByte != nil { - status = string(statusByte) - } - - return nil - }) - if err != nil { - return "", "", "", "", err - } - - return lvName, path, status, containerID, nil -} - -func SetDevboxContent(ctx context.Context, key, contentID, lvName, path, containerID string) error { - if contentID == "" || lvName == "" || path == "" || containerID == "" { - return fmt.Errorf("content key and storage path cannot be empty") +func SetDevboxContent(ctx context.Context, key, contentID, lvName, path string) error { + if contentID == "" || lvName == "" || path == "" { + return fmt.Errorf("content key and lv name cannot be empty") } return withDevboxBucket(ctx, func(ctx context.Context, bkt *bolt.Bucket, dbkt *bolt.Bucket) error { @@ -835,11 +792,8 @@ func SetDevboxContent(ctx context.Context, key, contentID, lvName, path, contain if err := sdbkt.Put([]byte(DevboxKeyLvName), []byte(lvName)); err != nil { return fmt.Errorf("failed to set lvname for content key %s: %w", contentID, err) } - if err := sdbkt.Put([]byte(DevboxKeyPath), []byte(path)); err != nil { - return fmt.Errorf("failed to set devbox path for content key %s: %w", contentID, err) - } - if err := sdbkt.Put([]byte(DevboxKeyContainerID), []byte(containerID)); err != nil { - return fmt.Errorf("failed to set container ID for content key %s: %w", contentID, err) + if err := sdbkt.Put([]byte(DevboxKeySnapshotKey), []byte(key)); err != nil { + return fmt.Errorf("failed to set snapshot key for content key %s: %w", contentID, err) } if err := sdbkt.Put([]byte(DevboxKeyStatus), []byte(DevboxStatusActive)); err != nil { return fmt.Errorf("failed to set status for content key %s: %w", contentID, err) @@ -889,16 +843,13 @@ func SetUnmountedWithKey(ctx context.Context, key string) (string, error) { if sdbkt == nil { return fmt.Errorf("devbox storage path bucket for content ID %s does not exist: %w", string(contentID), errdefs.ErrNotFound) } - if path := sdbkt.Get(DevboxKeyPath); path != nil { - if string(path) != mountPath { - return fmt.Errorf("new mount path for content ID %s does not match key %s, maybe it was mounted by other container", string(contentID), key) - } - if err := sdbkt.Delete(DevboxKeyPath); err != nil { - return fmt.Errorf("failed to delete mount path for content ID %s: %w", string(contentID), err) + if snapshotKey := sdbkt.Get(DevboxKeySnapshotKey); snapshotKey != nil { + if err := sdbkt.Delete(DevboxKeySnapshotKey); err != nil { + return fmt.Errorf("failed to delete snapshot key for content ID %s: %w", string(contentID), err) } return nil } - return fmt.Errorf("mount path for content ID %s is not set, cannot set status to unmounted: %w", string(contentID), errdefs.ErrNotFound) + return fmt.Errorf("snapshot key for content ID %s is not set, cannot set status to unmounted: %w", string(contentID), errdefs.ErrNotFound) }) if err != nil { return "", err @@ -937,17 +888,6 @@ func RemoveDevbox(ctx context.Context, Key string) (string, error) { if string(status) == string(DevboxStatusRemoved) { // remove the bucket if it is already marked as removed dbkt.DeleteBucket([]byte(contentID)) - } else { - // if the container ID is the same as the key, prove that the container is using this devbox - var containerID []byte - if containerID = sdbkt.Get(DevboxKeyContainerID); containerID != nil && string(containerID) == Key { - // if the status is not removed, only remove the mount path - // pathBeforeDelete := string(sdbkt.Get(DevboxKeyPath)) - sdbkt.Delete([]byte(DevboxKeyPath)) - // fmt.Printf("=======RemoveDevbox: Remove mount path for content ID:%s, mount path:%s, container ID:%s, key:%s\n", string(contentID), pathBeforeDelete, string(containerID), Key) - } - // if the container ID is not the same as the key, do not delete path - // fmt.Printf("=======RemoveDevbox: Do not remove mount path for content ID:%s, container ID:%s, key:%s\n", string(contentID), string(containerID), Key) } } return nil From 5b1079531b88be559e773a732c92206f06ebbfe6 Mon Sep 17 00:00:00 2001 From: Zllinc <2965202581@qq.com> Date: Wed, 29 Oct 2025 09:55:35 +0000 Subject: [PATCH 19/19] fix: restart node -> container create error --- snapshots/devbox/storage/bolt.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/snapshots/devbox/storage/bolt.go b/snapshots/devbox/storage/bolt.go index 641ee93afc3c..86652860b505 100644 --- a/snapshots/devbox/storage/bolt.go +++ b/snapshots/devbox/storage/bolt.go @@ -888,6 +888,11 @@ func RemoveDevbox(ctx context.Context, Key string) (string, error) { if string(status) == string(DevboxStatusRemoved) { // remove the bucket if it is already marked as removed dbkt.DeleteBucket([]byte(contentID)) + } else { + snapshotKey := sdbkt.Get(DevboxKeySnapshotKey) + if string(snapshotKey) == Key { + sdbkt.Delete(DevboxKeySnapshotKey) + } } } return nil